/* * Copyright (c) 2009 The DragonFly Project. All rights reserved. * * This code is derived from software contributed to The DragonFly Project * by Alex Hornung * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_DEVFS, "devfs", "Device File System (devfs) allocations"); DEVFS_DECLARE_CLONE_BITMAP(ops_id); /* * SYSREF Integration - reference counting, allocation, * sysid and syslink integration. */ static void devfs_cdev_terminate(cdev_t dev); static struct sysref_class cdev_sysref_class = { .name = "cdev", .mtype = M_DEVFS, .proto = SYSREF_PROTO_DEV, .offset = offsetof(struct cdev, si_sysref), .objsize = sizeof(struct cdev), .mag_capacity = 32, .flags = 0, .ops = { .terminate = (sysref_terminate_func_t)devfs_cdev_terminate } }; static struct objcache *devfs_node_cache; static struct objcache *devfs_msg_cache; static struct objcache *devfs_dev_cache; static struct objcache_malloc_args devfs_node_malloc_args = { sizeof(struct devfs_node), M_DEVFS }; struct objcache_malloc_args devfs_msg_malloc_args = { sizeof(struct devfs_msg), M_DEVFS }; struct objcache_malloc_args devfs_dev_malloc_args = { sizeof(struct cdev), M_DEVFS }; static struct devfs_dev_head devfs_dev_list = TAILQ_HEAD_INITIALIZER(devfs_dev_list); static struct devfs_mnt_head devfs_mnt_list = TAILQ_HEAD_INITIALIZER(devfs_mnt_list); static struct devfs_chandler_head devfs_chandler_list = TAILQ_HEAD_INITIALIZER(devfs_chandler_list); static struct devfs_alias_head devfs_alias_list = TAILQ_HEAD_INITIALIZER(devfs_alias_list); struct lock devfs_lock; static struct lwkt_port devfs_dispose_port; static struct lwkt_port devfs_msg_port; static struct thread *td_core; static ino_t d_ino = 0; static __uint32_t msg_id = 0; static struct spinlock ino_lock; static int devfs_debug_enable = 0; static ino_t devfs_fetch_ino(void); static int devfs_gc_dirs(struct devfs_node *); static int devfs_gc_links(struct devfs_node *, struct devfs_node *, size_t); static int devfs_create_all_dev_worker(struct devfs_node *); static int devfs_create_dev_worker(cdev_t, uid_t, gid_t, int); static int devfs_destroy_dev_worker(cdev_t); static int devfs_destroy_subnames_worker(char *); static int devfs_destroy_dev_by_ops_worker(struct dev_ops *, int); static int devfs_propagate_dev(cdev_t, int); static int devfs_unlink_dev(cdev_t dev); static int devfs_chandler_add_worker(char *, d_clone_t *); static int devfs_chandler_del_worker(char *); static void devfs_msg_autofree_reply(lwkt_port_t, lwkt_msg_t); static void devfs_msg_core(void *); static int devfs_find_device_by_name_worker(devfs_msg_t); static int devfs_find_device_by_udev_worker(devfs_msg_t); static int devfs_apply_reset_rules_caller(char *, int); static int devfs_apply_reset_rules_worker(struct devfs_node *, int); static int devfs_scan_callback_worker(devfs_scan_t *); static struct devfs_node *devfs_resolve_or_create_dir(struct devfs_node *, char *, size_t, int); static int devfs_make_alias_worker(struct devfs_alias *); static int devfs_alias_remove(cdev_t); static int devfs_alias_reap(void); static int devfs_alias_propagate(struct devfs_alias *); static int devfs_alias_apply(struct devfs_node *, struct devfs_alias *); static int devfs_alias_check_create(struct devfs_node *); static int devfs_clr_subnames_flag_worker(char *, uint32_t); static int devfs_destroy_subnames_without_flag_worker(char *, uint32_t); /* * devfs_debug() is a SYSCTL and TUNABLE controlled debug output function using kvprintf */ int devfs_debug(int level, char *fmt, ...) { __va_list ap; __va_start(ap, fmt); if (level <= devfs_debug_enable) kvprintf(fmt, ap); __va_end(ap); return 0; } /* * devfs_allocp() Allocates a new devfs node with the specified * parameters. The node is also automatically linked into the topology * if a parent is specified. It also calls the rule and alias stuff to * be applied on the new node */ struct devfs_node * devfs_allocp(devfs_nodetype devfsnodetype, char *name, struct devfs_node *parent, struct mount *mp, cdev_t dev) { struct devfs_node *node = NULL; size_t namlen = strlen(name); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocp -1- for %s\n", name?name:"NULL"); node = objcache_get(devfs_node_cache, M_WAITOK); bzero(node, sizeof(*node)); atomic_add_int(&(DEVFS_MNTDATA(mp)->leak_count), 1); node->d_dev = NULL; node->nchildren = 1; node->mp = mp; node->d_dir.d_ino = devfs_fetch_ino(); node->cookie_jar = 2; /* Leave 0 and 1 for '.' and '..', respectively */ /* Access Control members */ node->mode = DEVFS_DEFAULT_MODE; /* files access mode and type */ node->uid = DEVFS_DEFAULT_UID; /* owner user id */ node->gid = DEVFS_DEFAULT_GID; /* owner group id */ switch (devfsnodetype) { case Proot: /* Ensure that we don't recycle the root vnode */ node->flags |= DEVFS_NODE_LINKED; case Pdir: TAILQ_INIT(DEVFS_DENODE_HEAD(node)); node->d_dir.d_type = DT_DIR; node->nchildren = 2; break; case Plink: node->d_dir.d_type = DT_LNK; break; case Preg: node->d_dir.d_type = DT_REG; break; case Pdev: if (dev != NULL) { node->d_dir.d_type = DT_CHR; node->d_dev = dev; node->d_dir.d_ino = dev->si_inode; node->mode = dev->si_perms; /* files access mode and type */ node->uid = dev->si_uid; /* owner user id */ node->gid = dev->si_gid; /* owner group id */ devfs_alias_check_create(node); } break; default: panic("devfs_allocp: unknown node type"); } node->v_node = NULL; node->node_type = devfsnodetype; /* Init the dirent structure of each devfs vnode */ KKASSERT(namlen < 256); node->d_dir.d_namlen = namlen; node->d_dir.d_name = kmalloc(namlen+1, M_DEVFS, M_WAITOK); memcpy(node->d_dir.d_name, name, namlen); node->d_dir.d_name[namlen] = '\0'; /* Initialize the parent node element */ node->parent = parent; /* Apply rules */ devfs_rule_check_apply(node); /* xtime members */ nanotime(&node->atime); node->mtime = node->ctime = node->atime; /* * Associate with parent as last step, clean out namecache * reference. */ devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocp: about to insert node\n"); if ((parent != NULL) && ((parent->node_type == Proot) || (parent->node_type == Pdir))) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocp: node inserted %p\n", node); parent->nchildren++; node->cookie = parent->cookie_jar++; node->flags |= DEVFS_NODE_LINKED; TAILQ_INSERT_TAIL(DEVFS_DENODE_HEAD(parent), node, link); /* This forces negative namecache lookups to clear */ ++mp->mnt_namecache_gen; } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocp -end:2-\n"); return node; } /* * devfs_allocv() allocates a new vnode based on a devfs node. */ int devfs_allocv(struct vnode **vpp, struct devfs_node *node) { struct vnode *vp; int error = 0; KKASSERT(node); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -1-\n"); try_again: while ((vp = node->v_node) != NULL) { error = vget(vp, LK_EXCLUSIVE); if (error != ENOENT) { *vpp = vp; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv, code path 2...\n"); goto out; } } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -3-\n"); if ((error = getnewvnode(VT_DEVFS, node->mp, vpp, 0, 0)) != 0) goto out; vp = *vpp; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -4-\n"); if (node->v_node != NULL) { vp->v_type = VBAD; vx_put(vp); goto try_again; } vp->v_data = node; node->v_node = vp; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -5-\n"); switch (node->node_type) { case Proot: vp->v_flag |= VROOT; case Pdir: vp->v_type = VDIR; break; case Plink: vp->v_type = VLNK; break; case Preg: vp->v_type = VREG; break; case Pdev: vp->v_type = VCHR; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -6-\n"); KKASSERT(node->d_dev); if (node->d_dev) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -7-\n"); vp->v_uminor = node->d_dev->si_uminor; vp->v_umajor = 0; #if 0 vp->v_rdev = node->d_dev; #endif v_associate_rdev(vp, node->d_dev); vp->v_ops = &node->mp->mnt_vn_spec_ops; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -8-\n"); } else { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv: type is Pdev but d_dev is not set!!!!\n"); } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -9-\n"); break; default: panic("devfs_allocv: unknown node type"); } out: devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -10-\n"); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocv -end:11-\n"); return error; } /* * devfs_allocvp allocates both a devfs node (with the given settings) and a vnode * based on the newly created devfs node. */ int devfs_allocvp(struct mount *mp, struct vnode **vpp, devfs_nodetype devfsnodetype, char *name, struct devfs_node *parent, cdev_t dev) { struct devfs_node *node; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocvp -1-\n"); node = devfs_allocp(devfsnodetype, name, parent, mp, dev); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocvp -2-\n"); if (node != NULL) devfs_allocv(vpp, node); else *vpp = NULL; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_allocvp -end:3-\n"); return 0; } /* * Destroy the devfs_node. The node must be unlinked from the topology. * * This function will also destroy any vnode association with the node * and device. * * The cdev_t itself remains intact. */ int devfs_freep(struct devfs_node *node) { struct vnode *vp; KKASSERT(node); KKASSERT(((node->flags & DEVFS_NODE_LINKED) == 0) || (node->node_type == Proot)); KKASSERT((node->flags & DEVFS_DESTROYED) == 0); atomic_subtract_int(&(DEVFS_MNTDATA(node->mp)->leak_count), 1); if (node->symlink_name) { kfree(node->symlink_name, M_DEVFS); node->symlink_name = NULL; } /* * Remove the node from the orphan list if it is still on it. */ if (node->flags & DEVFS_ORPHANED) devfs_tracer_del_orphan(node); /* * Disassociate the vnode from the node. This also prevents the * vnode's reclaim code from double-freeing the node. */ if ((vp = node->v_node) != NULL) { #if 0 vp->v_rdev = NULL; #endif v_release_rdev(vp); vp->v_data = NULL; node->v_node = NULL; } if (node->d_dir.d_name) kfree(node->d_dir.d_name, M_DEVFS); node->flags |= DEVFS_DESTROYED; objcache_put(devfs_node_cache, node); return 0; } /* * Unlink the devfs node from the topology and add it to the orphan list. * The node will later be destroyed by freep. * * Any vnode association, including the v_rdev and v_data, remains intact * until the freep. */ int devfs_unlinkp(struct devfs_node *node) { struct devfs_node *parent; KKASSERT(node); devfs_tracer_add_orphan(node); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_unlinkp for %s\n", node->d_dir.d_name); parent = node->parent; /* * If the parent is known we can unlink the node out of the topology */ if (parent) { TAILQ_REMOVE(DEVFS_DENODE_HEAD(parent), node, link); parent->nchildren--; KKASSERT((parent->nchildren >= 0)); node->flags &= ~DEVFS_NODE_LINKED; } node->parent = NULL; return 0; } /* * devfs_reaperp() is a recursive function that iterates through all the * topology, unlinking and freeing all devfs nodes. */ int devfs_reaperp(struct devfs_node *node) { struct devfs_node *node1, *node2; if ((node->node_type == Proot) || (node->node_type == Pdir)) { devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren); if (node->nchildren > 2) { TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { devfs_reaperp(node1); } } } devfs_unlinkp(node); devfs_freep(node); return 0; } /* * devfs_gc() is devfs garbage collector. It takes care of unlinking and * freeing a node, but also removes empty directories and links that link * via devfs auto-link mechanism to the node being deleted. */ int devfs_gc(struct devfs_node *node) { struct devfs_node *root_node = DEVFS_MNTDATA(node->mp)->root_node; devfs_gc_links(root_node, node, node->nlinks); devfs_unlinkp(node); devfs_gc_dirs(root_node); devfs_freep(node); return 0; } /* * devfs_gc_dirs() is a helper function for devfs_gc, unlinking and freeing * empty directories. */ static int devfs_gc_dirs(struct devfs_node *node) { struct devfs_node *node1, *node2; if ((node->node_type == Proot) || (node->node_type == Pdir)) { devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren); if (node->nchildren > 2) { TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { devfs_gc_dirs(node1); } } if (node->nchildren == 2) { devfs_debug(DEVFS_DEBUG_DEBUG, "This node is called %s and it is empty\n", node->d_dir.d_name); devfs_unlinkp(node); devfs_freep(node); } } return 0; } /* * devfs_gc_links() is a helper function for devfs_gc, unlinking and freeing * eauto-linked nodes linking to the node being deleted. */ static int devfs_gc_links(struct devfs_node *node, struct devfs_node *target, size_t nlinks) { struct devfs_node *node1, *node2; if (nlinks > 0) { if ((node->node_type == Proot) || (node->node_type == Pdir)) { devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren); if (node->nchildren > 2) { TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { nlinks = devfs_gc_links(node1, target, nlinks); } } } else if (node->link_target == target) { nlinks--; devfs_unlinkp(node); devfs_freep(node); } } KKASSERT(nlinks >= 0); return nlinks; } /* * devfs_create_dev() is the asynchronous entry point for device creation. * It just sends a message with the relevant details to the devfs core. * * This function will reference the passed device. The reference is owned * by devfs and represents all of the device's node associations. */ int devfs_create_dev(cdev_t dev, uid_t uid, gid_t gid, int perms) { __uint64_t id; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_dev -1-, name: %s (%p)\n", dev->si_name, dev); reference_dev(dev); id = devfs_msg_send_dev(DEVFS_DEVICE_CREATE, dev, uid, gid, perms); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_dev -end:2- (unique id: %x) / (%p)\n", id, dev); return 0; } /* * devfs_destroy_dev() is the asynchronous entry point for device destruction. * It just sends a message with the relevant details to the devfs core. */ int devfs_destroy_dev(cdev_t dev) { devfs_msg_send_dev(DEVFS_DEVICE_DESTROY, dev, 0, 0, 0); return 0; } /* * devfs_mount_add() is the synchronous entry point for adding a new devfs * mount. It sends a synchronous message with the relevant details to the * devfs core. */ int devfs_mount_add(struct devfs_mnt_data *mnt) { devfs_msg_t msg; msg = devfs_msg_get(); msg->mdv_mnt = mnt; msg = devfs_msg_send_sync(DEVFS_MOUNT_ADD, msg); devfs_msg_put(msg); return 0; } /* * devfs_mount_del() is the synchronous entry point for removing a devfs mount. * It sends a synchronous message with the relevant details to the devfs core. */ int devfs_mount_del(struct devfs_mnt_data *mnt) { devfs_msg_t msg; msg = devfs_msg_get(); msg->mdv_mnt = mnt; msg = devfs_msg_send_sync(DEVFS_MOUNT_DEL, msg); devfs_msg_put(msg); return 0; } /* * devfs_destroy_subnames() is the synchronous entry point for device destruction * by subname. It just sends a message with the relevant details to the devfs core. */ int devfs_destroy_subnames(char *name) { devfs_msg_t msg; msg = devfs_msg_get(); msg->mdv_load = name; msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES, msg); devfs_msg_put(msg); return 0; } int devfs_clr_subnames_flag(char *name, uint32_t flag) { devfs_msg_t msg; msg = devfs_msg_get(); msg->mdv_flags.name = name; msg->mdv_flags.flag = flag; msg = devfs_msg_send_sync(DEVFS_CLR_SUBNAMES_FLAG, msg); devfs_msg_put(msg); return 0; } int devfs_destroy_subnames_without_flag(char *name, uint32_t flag) { devfs_msg_t msg; msg = devfs_msg_get(); msg->mdv_flags.name = name; msg->mdv_flags.flag = flag; msg = devfs_msg_send_sync(DEVFS_DESTROY_SUBNAMES_WO_FLAG, msg); devfs_msg_put(msg); return 0; } /* * devfs_create_all_dev is the asynchronous entry point to trigger device * node creation. It just sends a message with the relevant details to * the devfs core. */ int devfs_create_all_dev(struct devfs_node *root) { devfs_msg_send_generic(DEVFS_CREATE_ALL_DEV, root); return 0; } /* * devfs_destroy_dev_by_ops is the asynchronous entry point to destroy all * devices with a specific set of dev_ops and minor. It just sends a * message with the relevant details to the devfs core. */ int devfs_destroy_dev_by_ops(struct dev_ops *ops, int minor) { devfs_msg_send_ops(DEVFS_DESTROY_DEV_BY_OPS, ops, minor); return 0; } /* * devfs_clone_handler_add is the synchronous entry point to add a new * clone handler. It just sends a message with the relevant details to * the devfs core. */ int devfs_clone_handler_add(char *name, d_clone_t *nhandler) { devfs_msg_t msg; msg = devfs_msg_get(); msg->mdv_chandler.name = name; msg->mdv_chandler.nhandler = nhandler; msg = devfs_msg_send_sync(DEVFS_CHANDLER_ADD, msg); devfs_msg_put(msg); return 0; } /* * devfs_clone_handler_del is the synchronous entry point to remove a * clone handler. It just sends a message with the relevant details to * the devfs core. */ int devfs_clone_handler_del(char *name) { devfs_msg_t msg; msg = devfs_msg_get(); msg->mdv_chandler.name = name; msg->mdv_chandler.nhandler = NULL; msg = devfs_msg_send_sync(DEVFS_CHANDLER_DEL, msg); devfs_msg_put(msg); return 0; } /* * devfs_find_device_by_name is the synchronous entry point to find a * device given its name. It sends a synchronous message with the * relevant details to the devfs core and returns the answer. */ cdev_t devfs_find_device_by_name(const char *fmt, ...) { cdev_t found = NULL; devfs_msg_t msg; char target[PATH_MAX+1]; __va_list ap; int i; if (fmt == NULL) return NULL; __va_start(ap, fmt); i = kvcprintf(fmt, NULL, target, 10, ap); target[i] = '\0'; __va_end(ap); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_find_device_by_name: %s -1-\n", target); msg = devfs_msg_get(); msg->mdv_name = target; msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_NAME, msg); found = msg->mdv_cdev; devfs_msg_put(msg); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_find_device_by_name found? %s -end:2-\n", (found)?"YES":"NO"); return found; } /* * devfs_find_device_by_udev is the synchronous entry point to find a * device given its udev number. It sends a synchronous message with * the relevant details to the devfs core and returns the answer. */ cdev_t devfs_find_device_by_udev(udev_t udev) { cdev_t found = NULL; devfs_msg_t msg; msg = devfs_msg_get(); msg->mdv_udev = udev; msg = devfs_msg_send_sync(DEVFS_FIND_DEVICE_BY_UDEV, msg); found = msg->mdv_cdev; devfs_msg_put(msg); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_find_device_by_udev found? %s -end:3-\n", ((found) ? found->si_name:"NO")); return found; } /* * devfs_make_alias is the asynchronous entry point to register an alias * for a device. It just sends a message with the relevant details to the * devfs core. */ int devfs_make_alias(char *name, cdev_t dev_target) { struct devfs_alias *alias; alias = kmalloc(sizeof(struct devfs_alias), M_DEVFS, M_WAITOK); memcpy(alias->name, name, strlen(name) + 1); alias->dev_target = dev_target; devfs_msg_send_generic(DEVFS_MAKE_ALIAS, alias); return 0; } /* * devfs_apply_rules is the asynchronous entry point to trigger application * of all rules. It just sends a message with the relevant details to the * devfs core. */ int devfs_apply_rules(char *mntto) { char *new_name; size_t namelen; namelen = strlen(mntto) + 1; new_name = kmalloc(namelen, M_DEVFS, M_WAITOK); memcpy(new_name, mntto, namelen); devfs_msg_send_name(DEVFS_APPLY_RULES, new_name); return 0; } /* * devfs_reset_rules is the asynchronous entry point to trigger reset of all rules. * It just sends a message with the relevant details to the devfs core. */ int devfs_reset_rules(char *mntto) { char *new_name; size_t namelen; namelen = strlen(mntto) + 1; new_name = kmalloc(namelen, M_DEVFS, M_WAITOK); memcpy(new_name, mntto, namelen); devfs_msg_send_name(DEVFS_RESET_RULES, new_name); return 0; } /* * devfs_scan_callback is the asynchronous entry point to call a callback * on all cdevs. * It just sends a message with the relevant details to the devfs core. */ int devfs_scan_callback(devfs_scan_t *callback) { devfs_msg_t msg; /* Make sure that function pointers have the size of a generic pointer (innecessary) */ KKASSERT(sizeof(callback) == sizeof(void *)); msg = devfs_msg_get(); msg->mdv_load = callback; msg = devfs_msg_send_sync(DEVFS_SCAN_CALLBACK, msg); devfs_msg_put(msg); return 0; } /* * Acts as a message drain. Any message that is replied to here gets destroyed and * the memory freed. */ static void devfs_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg) { devfs_msg_put((devfs_msg_t)msg); } /* * devfs_msg_get allocates a new devfs msg and returns it. */ devfs_msg_t devfs_msg_get() { return objcache_get(devfs_msg_cache, M_WAITOK); } /* * devfs_msg_put deallocates a given devfs msg. */ int devfs_msg_put(devfs_msg_t msg) { objcache_put(devfs_msg_cache, msg); return 0; } /* * devfs_msg_send is the generic asynchronous message sending facility * for devfs. By default the reply port is the automatic disposal port. */ __uint32_t devfs_msg_send(uint32_t cmd, devfs_msg_t devfs_msg) { lwkt_port_t port = &devfs_msg_port; lwkt_initmsg(&devfs_msg->hdr, &devfs_dispose_port, 0); devfs_msg->hdr.u.ms_result = cmd; devfs_msg->id = atomic_fetchadd_int(&msg_id, 1); lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); return devfs_msg->id; } /* * devfs_msg_send_sync is the generic synchronous message sending * facility for devfs. It initializes a local reply port and waits * for the core's answer. This answer is then returned. */ devfs_msg_t devfs_msg_send_sync(uint32_t cmd, devfs_msg_t devfs_msg) { struct lwkt_port rep_port; devfs_msg_t msg_incoming; lwkt_port_t port = &devfs_msg_port; lwkt_initport_thread(&rep_port, curthread); lwkt_initmsg(&devfs_msg->hdr, &rep_port, 0); devfs_msg->hdr.u.ms_result = cmd; devfs_msg->id = atomic_fetchadd_int(&msg_id, 1); lwkt_sendmsg(port, (lwkt_msg_t)devfs_msg); msg_incoming = lwkt_waitport(&rep_port, 0); return msg_incoming; } /* * sends a message with a generic argument. */ __uint32_t devfs_msg_send_generic(uint32_t cmd, void *load) { devfs_msg_t devfs_msg = devfs_msg_get(); devfs_msg->mdv_load = load; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_send_generic -1- (%p)\n", load); return devfs_msg_send(cmd, devfs_msg); } /* * sends a message with a name argument. */ __uint32_t devfs_msg_send_name(uint32_t cmd, char *name) { devfs_msg_t devfs_msg = devfs_msg_get(); devfs_msg->mdv_name = name; return devfs_msg_send(cmd, devfs_msg); } /* * sends a message with a mount argument. */ __uint32_t devfs_msg_send_mount(uint32_t cmd, struct devfs_mnt_data *mnt) { devfs_msg_t devfs_msg = devfs_msg_get(); devfs_msg->mdv_mnt = mnt; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_send_mp -1- (%p)\n", mnt); return devfs_msg_send(cmd, devfs_msg); } /* * sends a message with an ops argument. */ __uint32_t devfs_msg_send_ops(uint32_t cmd, struct dev_ops *ops, int minor) { devfs_msg_t devfs_msg = devfs_msg_get(); devfs_msg->mdv_ops.ops = ops; devfs_msg->mdv_ops.minor = minor; return devfs_msg_send(cmd, devfs_msg); } /* * sends a message with a clone handler argument. */ __uint32_t devfs_msg_send_chandler(uint32_t cmd, char *name, d_clone_t handler) { devfs_msg_t devfs_msg = devfs_msg_get(); devfs_msg->mdv_chandler.name = name; devfs_msg->mdv_chandler.nhandler = handler; return devfs_msg_send(cmd, devfs_msg); } /* * sends a message with a device argument. */ __uint32_t devfs_msg_send_dev(uint32_t cmd, cdev_t dev, uid_t uid, gid_t gid, int perms) { devfs_msg_t devfs_msg = devfs_msg_get(); devfs_msg->mdv_dev.dev = dev; devfs_msg->mdv_dev.uid = uid; devfs_msg->mdv_dev.gid = gid; devfs_msg->mdv_dev.perms = perms; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_send_dev -1- (%p)\n", dev); return devfs_msg_send(cmd, devfs_msg); } /* * sends a message with a link argument. */ __uint32_t devfs_msg_send_link(uint32_t cmd, char *name, char *target, struct mount *mp) { devfs_msg_t devfs_msg = devfs_msg_get(); devfs_msg->mdv_link.name = name; devfs_msg->mdv_link.target = target; devfs_msg->mdv_link.mp = mp; return devfs_msg_send(cmd, devfs_msg); } /* * devfs_msg_core is the main devfs thread. It handles all incoming messages * and calls the relevant worker functions. By using messages it's assured * that events occur in the correct order. */ static void devfs_msg_core(void *arg) { uint8_t run = 1; devfs_msg_t msg; cdev_t dev; struct devfs_mnt_data *mnt; struct devfs_node *node; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core -1-\n"); lwkt_initport_thread(&devfs_msg_port, curthread); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core -2-\n"); wakeup(td_core/*devfs_id*/); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core -3-\n"); while (run) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core -loop:4-\n"); msg = (devfs_msg_t)lwkt_waitport(&devfs_msg_port, 0); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core, new msg: %x (unique id: %x)\n", (unsigned int)msg->hdr.u.ms_result, msg->id); lockmgr(&devfs_lock, LK_EXCLUSIVE); switch (msg->hdr.u.ms_result) { case DEVFS_DEVICE_CREATE: dev = msg->mdv_dev.dev; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core device create msg %s(%p)\n", dev->si_name, dev); devfs_create_dev_worker(dev, msg->mdv_dev.uid, msg->mdv_dev.gid, msg->mdv_dev.perms); break; case DEVFS_DEVICE_DESTROY: devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core device destroy msg\n"); dev = msg->mdv_dev.dev; devfs_destroy_dev_worker(dev); break; case DEVFS_DESTROY_SUBNAMES: devfs_destroy_subnames_worker(msg->mdv_load); break; case DEVFS_DESTROY_DEV_BY_OPS: devfs_destroy_dev_by_ops_worker(msg->mdv_ops.ops, msg->mdv_ops.minor); break; case DEVFS_CREATE_ALL_DEV: devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core device create ALL msg\n"); node = (struct devfs_node *)msg->mdv_load; devfs_create_all_dev_worker(node); break; case DEVFS_MOUNT_ADD: mnt = msg->mdv_mnt; TAILQ_INSERT_TAIL(&devfs_mnt_list, mnt, link); devfs_create_all_dev_worker(mnt->root_node); break; case DEVFS_MOUNT_DEL: mnt = msg->mdv_mnt; TAILQ_REMOVE(&devfs_mnt_list, mnt, link); devfs_reaperp(mnt->root_node); if (mnt->leak_count) { devfs_debug(DEVFS_DEBUG_SHOW, "Leaked %d devfs_node elements!\n", mnt->leak_count); } break; case DEVFS_CHANDLER_ADD: devfs_chandler_add_worker(msg->mdv_chandler.name, msg->mdv_chandler.nhandler); break; case DEVFS_CHANDLER_DEL: devfs_chandler_del_worker(msg->mdv_chandler.name); break; case DEVFS_FIND_DEVICE_BY_NAME: devfs_find_device_by_name_worker(msg); break; case DEVFS_FIND_DEVICE_BY_UDEV: devfs_find_device_by_udev_worker(msg); break; case DEVFS_MAKE_ALIAS: devfs_make_alias_worker((struct devfs_alias *)msg->mdv_load); break; case DEVFS_APPLY_RULES: devfs_apply_reset_rules_caller(msg->mdv_name, 1); break; case DEVFS_RESET_RULES: devfs_apply_reset_rules_caller(msg->mdv_name, 0); break; case DEVFS_SCAN_CALLBACK: devfs_scan_callback_worker((devfs_scan_t *)msg->mdv_load); break; case DEVFS_CLR_SUBNAMES_FLAG: devfs_clr_subnames_flag_worker(msg->mdv_flags.name, msg->mdv_flags.flag); break; case DEVFS_DESTROY_SUBNAMES_WO_FLAG: devfs_destroy_subnames_without_flag_worker(msg->mdv_flags.name, msg->mdv_flags.flag); break; case DEVFS_TERMINATE_CORE: run = 0; break; case DEVFS_SYNC: break; default: devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_msg_core: unknown message " "received at core\n"); break; } lockmgr(&devfs_lock, LK_RELEASE); lwkt_replymsg((lwkt_msg_t)msg, 0); } wakeup(td_core/*devfs_id*/); lwkt_exit(); } /* * Worker function to insert a new dev into the dev list and initialize its * permissions. It also calls devfs_propagate_dev which in turn propagates * the change to all mount points. * * The passed dev is already referenced. This reference is eaten by this * function and represents the dev's linkage into devfs_dev_list. */ static int devfs_create_dev_worker(cdev_t dev, uid_t uid, gid_t gid, int perms) { KKASSERT(dev); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_dev_worker -1- -%s- (%p)\n", dev->si_name, dev); dev->si_uid = uid; dev->si_gid = gid; dev->si_perms = perms; devfs_link_dev(dev); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_dev_worker -2-\n"); devfs_propagate_dev(dev, 1); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_dev_worker -end:3-\n"); return 0; } /* * Worker function to delete a dev from the dev list and free the cdev. * It also calls devfs_propagate_dev which in turn propagates the change * to all mount points. */ static int devfs_destroy_dev_worker(cdev_t dev) { int error; KKASSERT(dev); KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_dev_worker -1- %s\n", dev->si_name); error = devfs_unlink_dev(dev); devfs_propagate_dev(dev, 0); if (error == 0) release_dev(dev); /* link ref */ release_dev(dev); release_dev(dev); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_dev_worker -end:5-\n"); return 0; } /* * Worker function to destroy all devices with a certain basename. * Calls devfs_destroy_dev_worker for the actual destruction. */ static int devfs_destroy_subnames_worker(char *name) { cdev_t dev, dev1; size_t len = strlen(name); TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { if (!strncmp(dev->si_name, name, len)) { if (dev->si_name[len] != '\0') { devfs_destroy_dev_worker(dev); /* release_dev(dev); */ } } } return 0; } static int devfs_clr_subnames_flag_worker(char *name, uint32_t flag) { cdev_t dev, dev1; size_t len = strlen(name); TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { if (!strncmp(dev->si_name, name, len)) { if (dev->si_name[len] != '\0') { dev->si_flags &= ~flag; } } } return 0; } static int devfs_destroy_subnames_without_flag_worker(char *name, uint32_t flag) { cdev_t dev, dev1; size_t len = strlen(name); TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { if (!strncmp(dev->si_name, name, len)) { if (dev->si_name[len] != '\0') { if (!(dev->si_flags & flag)) { devfs_destroy_dev_worker(dev); } } } } return 0; } /* * Worker function that creates all device nodes on top of a devfs * root node. */ static int devfs_create_all_dev_worker(struct devfs_node *root) { cdev_t dev; KKASSERT(root); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_all_dev_worker -1-\n"); TAILQ_FOREACH(dev, &devfs_dev_list, link) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_all_dev_worker -loop:2- -%s-\n", dev->si_name); devfs_create_device_node(root, dev, NULL, NULL); } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_all_dev_worker -end:3-\n"); return 0; } /* * Worker function that destroys all devices that match a specific * dev_ops and/or minor. If minor is less than 0, it is not matched * against. It also propagates all changes. */ static int devfs_destroy_dev_by_ops_worker(struct dev_ops *ops, int minor) { cdev_t dev, dev1; KKASSERT(ops); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_dev_by_ops_worker -1-\n"); TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { if (dev->si_ops != ops) continue; if ((minor < 0) || (dev->si_uminor == minor)) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_dev_by_ops_worker " "-loop:2- -%s-\n", dev->si_name); devfs_destroy_dev_worker(dev); } } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_dev_by_ops_worker -end:3-\n"); return 0; } /* * Worker function that registers a new clone handler in devfs. */ static int devfs_chandler_add_worker(char *name, d_clone_t *nhandler) { struct devfs_clone_handler *chandler = NULL; u_char len = strlen(name); if (len == 0) return 1; TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { if (chandler->namlen == len) { if (!memcmp(chandler->name, name, len)) { /* Clonable basename already exists */ return 1; } } } chandler = kmalloc(sizeof(*chandler), M_DEVFS, M_WAITOK | M_ZERO); memcpy(chandler->name, name, len+1); chandler->namlen = len; chandler->nhandler = nhandler; TAILQ_INSERT_TAIL(&devfs_chandler_list, chandler, link); return 0; } /* * Worker function that removes a given clone handler from the * clone handler list. */ static int devfs_chandler_del_worker(char *name) { struct devfs_clone_handler *chandler, *chandler2; u_char len = strlen(name); if (len == 0) return 1; TAILQ_FOREACH_MUTABLE(chandler, &devfs_chandler_list, link, chandler2) { if (chandler->namlen != len) continue; if (memcmp(chandler->name, name, len)) continue; TAILQ_REMOVE(&devfs_chandler_list, chandler, link); kfree(chandler, M_DEVFS); } return 0; } /* * Worker function that finds a given device name and changes * the message received accordingly so that when replied to, * the answer is returned to the caller. */ static int devfs_find_device_by_name_worker(devfs_msg_t devfs_msg) { struct devfs_alias *alias; cdev_t dev; cdev_t found = NULL; TAILQ_FOREACH(dev, &devfs_dev_list, link) { if (strcmp(devfs_msg->mdv_name, dev->si_name) == 0) { found = dev; break; } } if (found == NULL) { TAILQ_FOREACH(alias, &devfs_alias_list, link) { if (strcmp(devfs_msg->mdv_name, alias->name) == 0) { found = alias->dev_target; break; } } } devfs_msg->mdv_cdev = found; return 0; } /* * Worker function that finds a given device udev and changes * the message received accordingly so that when replied to, * the answer is returned to the caller. */ static int devfs_find_device_by_udev_worker(devfs_msg_t devfs_msg) { cdev_t dev, dev1; cdev_t found = NULL; TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { if (((udev_t)dev->si_inode) == devfs_msg->mdv_udev) { found = dev; break; } } devfs_msg->mdv_cdev = found; return 0; } /* * Worker function that inserts a given alias into the * alias list, and propagates the alias to all mount * points. */ static int devfs_make_alias_worker(struct devfs_alias *alias) { struct devfs_alias *alias2; size_t len = strlen(alias->name); int found = 0; TAILQ_FOREACH(alias2, &devfs_alias_list, link) { if (!memcmp(alias->name, alias2->name, len)) { /* XXX */ found = 1; break; } } if (!found) { TAILQ_INSERT_TAIL(&devfs_alias_list, alias, link); devfs_alias_propagate(alias); } else { devfs_debug(DEVFS_DEBUG_DEBUG, "Warning: duplicate devfs_make_alias for %s\n", alias->name); kfree(alias, M_DEVFS); } return 0; } /* * Function that removes and frees all aliases. */ static int devfs_alias_reap(void) { struct devfs_alias *alias, *alias2; TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { TAILQ_REMOVE(&devfs_alias_list, alias, link); kfree(alias, M_DEVFS); } return 0; } /* * Function that removes an alias matching a specific cdev and frees * it accordingly. */ static int devfs_alias_remove(cdev_t dev) { struct devfs_alias *alias, *alias2; TAILQ_FOREACH_MUTABLE(alias, &devfs_alias_list, link, alias2) { if (alias->dev_target == dev) { TAILQ_REMOVE(&devfs_alias_list, alias, link); kfree(alias, M_DEVFS); } } return 0; } /* * This function propagates a new alias to all mount points. */ static int devfs_alias_propagate(struct devfs_alias *alias) { struct devfs_mnt_data *mnt; TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { devfs_alias_apply(mnt->root_node, alias); } return 0; } /* * This function is a recursive function iterating through * all device nodes in the topology and, if applicable, * creating the relevant alias for a device node. */ static int devfs_alias_apply(struct devfs_node *node, struct devfs_alias *alias) { struct devfs_node *node1, *node2; KKASSERT(alias != NULL); if ((node->node_type == Proot) || (node->node_type == Pdir)) { devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren); if (node->nchildren > 2) { TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { devfs_alias_apply(node1, alias); } } } else { if (node->d_dev == alias->dev_target) devfs_alias_create(alias->name, node); } return 0; } /* * This function checks if any alias possibly is applicable * to the given node. If so, the alias is created. */ static int devfs_alias_check_create(struct devfs_node *node) { struct devfs_alias *alias; TAILQ_FOREACH(alias, &devfs_alias_list, link) { if (node->d_dev == alias->dev_target) devfs_alias_create(alias->name, node); } return 0; } /* * This function creates an alias with a given name * linking to a given devfs node. It also increments * the link count on the target node. */ int devfs_alias_create(char *name_orig, struct devfs_node *target) { struct mount *mp = target->mp; struct devfs_node *parent = DEVFS_MNTDATA(mp)->root_node; struct devfs_node *linknode; char *create_path = NULL; char *name, name_buf[PATH_MAX]; KKASSERT((lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE); devfs_resolve_name_path(name_orig, name_buf, &create_path, &name); if (create_path) parent = devfs_resolve_or_create_path(parent, create_path, 1); if (devfs_find_device_node_by_name(parent, name)) { devfs_debug(DEVFS_DEBUG_DEBUG, "Node already exists: %s " "(devfs_make_alias_worker)!\n", name); return 1; } linknode = devfs_allocp(Plink, name, parent, mp, NULL); if (linknode == NULL) return 1; linknode->link_target = target; target->nlinks++; #if 0 linknode->flags |= DEVFS_LINK; #endif return 0; } /* * This function is called by the core and handles mount point * strings. It either calls the relevant worker (devfs_apply_ * reset_rules_worker) on all mountpoints or only a specific * one. */ static int devfs_apply_reset_rules_caller(char *mountto, int apply) { struct devfs_mnt_data *mnt; size_t len = strlen(mountto); if (mountto[0] != '*') { TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { if ((len == mnt->mntonnamelen) && (!memcmp(mnt->mp->mnt_stat.f_mntonname, mountto, len))) { devfs_apply_reset_rules_worker(mnt->root_node, apply); break; } } } else { TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { devfs_apply_reset_rules_worker(mnt->root_node, apply); } } kfree(mountto, M_DEVFS); return 0; } /* * This worker function applies or resets, depending on the arguments, a rule * to the whole given topology. *RECURSIVE* */ static int devfs_apply_reset_rules_worker(struct devfs_node *node, int apply) { struct devfs_node *node1, *node2; if ((node->node_type == Proot) || (node->node_type == Pdir)) { devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren); if (node->nchildren > 2) { TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { devfs_apply_reset_rules_worker(node1, apply); } } } if (apply) devfs_rule_check_apply(node); else devfs_rule_reset_node(node); return 0; } /* * This function calls a given callback function for * every dev node in the devfs dev list. */ static int devfs_scan_callback_worker(devfs_scan_t *callback) { cdev_t dev, dev1; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_scan_callback: %p -1-\n", callback); TAILQ_FOREACH_MUTABLE(dev, &devfs_dev_list, link, dev1) { callback(dev); } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_scan_callback: finished\n"); return 0; } /* * This function tries to resolve a given directory, or if not * found and creation requested, creates the given directory. */ static struct devfs_node * devfs_resolve_or_create_dir(struct devfs_node *parent, char *dir_name, size_t name_len, int create) { struct devfs_node *node, *found = NULL; TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { if (name_len == node->d_dir.d_namlen) { if (!memcmp(dir_name, node->d_dir.d_name, name_len)) { found = node; break; } } } if ((found == NULL) && (create)) { found = devfs_allocp(Pdir, dir_name, parent, parent->mp, NULL); } return found; } /* * This function tries to resolve a complete path. If creation is requested, * if a given part of the path cannot be resolved (because it doesn't exist), * it is created. */ struct devfs_node * devfs_resolve_or_create_path(struct devfs_node *parent, char *path, int create) { struct devfs_node *node = parent; char buf[PATH_MAX]; size_t idx = 0; if (path == NULL) return parent; for (; *path != '\0' ; path++) { if (*path != '/') { buf[idx++] = *path; } else { buf[idx] = '\0'; node = devfs_resolve_or_create_dir(node, buf, idx, create); if (node == NULL) return NULL; idx = 0; } } buf[idx] = '\0'; return devfs_resolve_or_create_dir(node, buf, idx, create); } /* * Takes a full path and strips it into a directory path and a name. * For a/b/c/foo, it returns foo in namep and a/b/c in pathp. It * requires a working buffer with enough size to keep the whole * fullpath. */ int devfs_resolve_name_path(char *fullpath, char *buf, char **pathp, char **namep) { char *name = NULL; char *path = NULL; size_t len = strlen(fullpath) + 1; int i; KKASSERT((fullpath != NULL) && (buf != NULL) && (pathp != NULL) && (namep != NULL)); memcpy(buf, fullpath, len); for (i = len-1; i>= 0; i--) { if (buf[i] == '/') { buf[i] = '\0'; name = &(buf[i+1]); path = buf; break; } } *pathp = path; if (name) { *namep = name; } else { *namep = buf; } return 0; } /* * This function creates a new devfs node for a given device. It can * handle a complete path as device name, and accordingly creates * the path and the final device node. * * The reference count on the passed dev remains unchanged. */ struct devfs_node * devfs_create_device_node(struct devfs_node *root, cdev_t dev, char *dev_name, char *path_fmt, ...) { struct devfs_node *parent, *node = NULL; char *path = NULL; char *name, name_buf[PATH_MAX]; __va_list ap; int i, found; char *create_path = NULL; char *names = "pqrsPQRS"; if (path_fmt != NULL) { path = kmalloc(PATH_MAX+1, M_DEVFS, M_WAITOK); __va_start(ap, path_fmt); i = kvcprintf(path_fmt, NULL, path, 10, ap); path[i] = '\0'; __va_end(ap); } parent = devfs_resolve_or_create_path(root, path, 1); KKASSERT(parent); devfs_resolve_name_path(((dev_name == NULL) && (dev))?(dev->si_name):(dev_name), name_buf, &create_path, &name); if (create_path) parent = devfs_resolve_or_create_path(parent, create_path, 1); if (devfs_find_device_node_by_name(parent, name)) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_device_node: " "DEVICE %s ALREADY EXISTS!!! Ignoring creation request.\n", name); goto out; } devfs_debug(DEVFS_DEBUG_DEBUG, "parent->d_dir.d_name=%s\n", parent->d_dir.d_name); node = devfs_allocp(Pdev, name, parent, parent->mp, dev); devfs_debug(DEVFS_DEBUG_DEBUG, "node->d_dir.d_name=%s\n", node->d_dir.d_name); #if 0 /* Ugly unix98 pty magic, to hide pty master (ptm) devices and their directory */ if ((dev) && (strlen(dev->si_name) >= 4) && (!memcmp(dev->si_name, "ptm/", 4))) { node->parent->flags |= DEVFS_HIDDEN; node->flags |= DEVFS_HIDDEN; } #endif devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_device_node: marker A\n"); /* Ugly pty magic, to tag pty devices as such and hide them if needed */ if ((strlen(name) >= 3) && (!memcmp(name, "pty", 3))) node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_device_node: marker B\n"); if ((strlen(name) >= 3) && (!memcmp(name, "tty", 3))) { found = 0; for (i = 0; i < strlen(names); i++) { if (name[3] == names[i]) { found = 1; break; } } if (found) node->flags |= (DEVFS_PTY | DEVFS_INVISIBLE); } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_create_device_node: marker C\n"); out: if (path_fmt != NULL) kfree(path, M_DEVFS); return node; } /* * This function finds a given device node in the topology with a given * cdev. */ struct devfs_node * devfs_find_device_node(struct devfs_node *node, cdev_t target) { struct devfs_node *node1, *node2, *found = NULL; if ((node->node_type == Proot) || (node->node_type == Pdir)) { devfs_debug(DEVFS_DEBUG_DEBUG, "This node is Pdir or Proot; has %d children\n", node->nchildren); if (node->nchildren > 2) { TAILQ_FOREACH_MUTABLE(node1, DEVFS_DENODE_HEAD(node), link, node2) { if ((found = devfs_find_device_node(node1, target))) return found; } } } else if (node->node_type == Pdev) { if (node->d_dev == target) return node; } return NULL; } /* * This function finds a device node in the topology by its * name and returns it. */ struct devfs_node * devfs_find_device_node_by_name(struct devfs_node *parent, char *target) { struct devfs_node *node, *found = NULL; size_t len = strlen(target); TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(parent), link) { if ((len == node->d_dir.d_namlen) && (!memcmp(node->d_dir.d_name, target, len))) { found = node; break; } } return found; } /* * This function takes a cdev and removes its devfs node in the * given topology. The cdev remains intact. */ int devfs_destroy_device_node(struct devfs_node *root, cdev_t target) { struct devfs_node *node, *parent; char *name, name_buf[PATH_MAX]; char *create_path = NULL; KKASSERT(target); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_destroy_device_node\n"); memcpy(name_buf, target->si_name, strlen(target->si_name)+1); devfs_resolve_name_path(target->si_name, name_buf, &create_path, &name); devfs_debug(DEVFS_DEBUG_DEBUG, "create_path: %s\n", create_path); devfs_debug(DEVFS_DEBUG_DEBUG, "name: %s\n", name); if (create_path) parent = devfs_resolve_or_create_path(root, create_path, 0); else parent = root; devfs_debug(DEVFS_DEBUG_DEBUG, "-> marker <-\n"); if (parent == NULL) return 1; devfs_debug(DEVFS_DEBUG_DEBUG, "->d_dir.d_name=%s\n", parent->d_dir.d_name); node = devfs_find_device_node_by_name(parent, name); devfs_debug(DEVFS_DEBUG_DEBUG, "->d_dir.d_name=%s\n", ((node) ? (node->d_dir.d_name) : "SHIT!")); if (node) devfs_gc(node); return 0; } /* * Just set perms and ownership for given node. */ int devfs_set_perms(struct devfs_node *node, uid_t uid, gid_t gid, u_short mode, u_long flags) { node->mode = mode; /* files access mode and type */ node->uid = uid; /* owner user id */ node->gid = gid; /* owner group id */ return 0; } /* * Propagates a device attach/detach to all mount * points. Also takes care of automatic alias removal * for a deleted cdev. */ static int devfs_propagate_dev(cdev_t dev, int attach) { struct devfs_mnt_data *mnt; devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_propagate_dev -1-\n"); TAILQ_FOREACH(mnt, &devfs_mnt_list, link) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_propagate_dev -loop:2-\n"); if (attach) { /* Device is being attached */ devfs_create_device_node(mnt->root_node, dev, NULL, NULL ); } else { /* Device is being detached */ devfs_alias_remove(dev); devfs_destroy_device_node(mnt->root_node, dev); } } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_propagate_dev -end:3-\n"); return 0; } /* * devfs_node_to_path takes a node and a buffer of a size of * at least PATH_MAX, resolves the full path from the root * node and writes it in a humanly-readable format into the * buffer. * If DEVFS_STASH_DEPTH is less than the directory level up * to the root node, only the last DEVFS_STASH_DEPTH levels * of the path are resolved. */ int devfs_node_to_path(struct devfs_node *node, char *buffer) { #define DEVFS_STASH_DEPTH 32 struct devfs_node *node_stash[DEVFS_STASH_DEPTH]; int i, offset; memset(buffer, 0, PATH_MAX); for (i = 0; (i < DEVFS_STASH_DEPTH) && (node->node_type != Proot); i++) { node_stash[i] = node; node = node->parent; } i--; for (offset = 0; i >= 0; i--) { memcpy(buffer+offset, node_stash[i]->d_dir.d_name, node_stash[i]->d_dir.d_namlen); offset += node_stash[i]->d_dir.d_namlen; if (i > 0) { *(buffer+offset) = '/'; offset++; } } #undef DEVFS_STASH_DEPTH return 0; } /* * devfs_clone either returns a basename from a complete name by * returning the length of the name without trailing digits, or, * if clone != 0, calls the device's clone handler to get a new * device, which in turn is returned in devp. */ int devfs_clone(char *name, size_t *namlenp, cdev_t *devp, int clone, struct ucred *cred) { KKASSERT(namlenp); size_t len = *namlenp; int error = 1; struct devfs_clone_handler *chandler; struct dev_clone_args ap; if (!clone) { for (; (len > 0) && (DEVFS_ISDIGIT(name[len-1])); len--); } TAILQ_FOREACH(chandler, &devfs_chandler_list, link) { devfs_debug(DEVFS_DEBUG_DEBUG, "len=%d, chandler->namlen=%d\n", len, chandler->namlen); devfs_debug(DEVFS_DEBUG_DEBUG, "name=%s, chandler->name=%s\n", name, chandler->name); if ((chandler->namlen == len) && (!memcmp(chandler->name, name, len)) && (chandler->nhandler)) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_nclone: found clone handler for the base name at %p\n", chandler->nhandler); if (clone) { ap.a_dev = NULL; ap.a_name = name; ap.a_namelen = len; ap.a_cred = cred; error = (chandler->nhandler)(&ap); KKASSERT(devp); *devp = ap.a_dev; } else { *namlenp = len; error = 0; } break; } } return error; } /* * Registers a new orphan in the orphan list. */ void devfs_tracer_add_orphan(struct devfs_node *node) { struct devfs_orphan *orphan; KKASSERT(node); orphan = kmalloc(sizeof(struct devfs_orphan), M_DEVFS, M_WAITOK); orphan->node = node; KKASSERT((node->flags & DEVFS_ORPHANED) == 0); node->flags |= DEVFS_ORPHANED; TAILQ_INSERT_TAIL(DEVFS_ORPHANLIST(node->mp), orphan, link); } /* * Removes an orphan from the orphan list. */ void devfs_tracer_del_orphan(struct devfs_node *node) { struct devfs_orphan *orphan; KKASSERT(node); TAILQ_FOREACH(orphan, DEVFS_ORPHANLIST(node->mp), link) { if (orphan->node == node) { node->flags &= ~DEVFS_ORPHANED; TAILQ_REMOVE(DEVFS_ORPHANLIST(node->mp), orphan, link); kfree(orphan, M_DEVFS); break; } } } /* * Counts the orphans in the orphan list, and if cleanup * is specified, also frees the orphan and removes it from * the list. */ size_t devfs_tracer_orphan_count(struct mount *mp, int cleanup) { struct devfs_orphan *orphan, *orphan2; size_t count = 0; TAILQ_FOREACH_MUTABLE(orphan, DEVFS_ORPHANLIST(mp), link, orphan2) { count++; if (cleanup) { TAILQ_REMOVE(DEVFS_ORPHANLIST(mp), orphan, link); orphan->node->flags &= ~DEVFS_ORPHANED; devfs_freep(orphan->node); kfree(orphan, M_DEVFS); } } return count; } /* * Fetch an ino_t from the global d_ino by increasing it * while spinlocked. */ static ino_t devfs_fetch_ino(void) { ino_t ret; spin_lock_wr(&ino_lock); ret = d_ino++; spin_unlock_wr(&ino_lock); return ret; } /* * Allocates a new cdev and initializes it's most basic * fields. */ cdev_t devfs_new_cdev(struct dev_ops *ops, int minor) { cdev_t dev = sysref_alloc(&cdev_sysref_class); sysref_activate(&dev->si_sysref); reference_dev(dev); devfs_debug(DEVFS_DEBUG_DEBUG, "new_cdev: clearing first %d bytes\n", offsetof(struct cdev, si_sysref)); memset(dev, 0, offsetof(struct cdev, si_sysref)); dev->si_uid = 0; dev->si_gid = 0; dev->si_perms = 0; dev->si_drv1 = NULL; dev->si_drv2 = NULL; dev->si_lastread = 0; /* time_second */ dev->si_lastwrite = 0; /* time_second */ dev->si_ops = ops; dev->si_flags = 0; dev->si_umajor = 0; dev->si_uminor = minor; dev->si_inode = makeudev(devfs_reference_ops(ops), minor); return dev; } static void devfs_cdev_terminate(cdev_t dev) { int locked = 0; /* Check if it is locked already. if not, we acquire the devfs lock */ if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) { lockmgr(&devfs_lock, LK_EXCLUSIVE); locked = 1; } devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_cdev_terminate: Taking care of dev->si_name=%s\n", dev->si_name); /* Propagate destruction, just in case */ devfs_propagate_dev(dev, 0); /* If we acquired the lock, we also get rid of it */ if (locked) lockmgr(&devfs_lock, LK_RELEASE); devfs_release_ops(dev->si_ops); /* Finally destroy the device */ sysref_put(&dev->si_sysref); } /* * Links a given cdev into the dev list. */ int devfs_link_dev(cdev_t dev) { KKASSERT((dev->si_flags & SI_DEVFS_LINKED) == 0); dev->si_flags |= SI_DEVFS_LINKED; TAILQ_INSERT_TAIL(&devfs_dev_list, dev, link); return 0; } /* * Removes a given cdev from the dev list. The caller is responsible for * releasing the reference on the device associated with the linkage. * * Returns EALREADY if the dev has already been unlinked. */ static int devfs_unlink_dev(cdev_t dev) { if ((dev->si_flags & SI_DEVFS_LINKED)) { TAILQ_REMOVE(&devfs_dev_list, dev, link); dev->si_flags &= ~SI_DEVFS_LINKED; return (0); } return (EALREADY); } int devfs_node_is_accessible(struct devfs_node *node) { if ((node) && (!(node->flags & DEVFS_HIDDEN))) return 1; else return 0; } int devfs_reference_ops(struct dev_ops *ops) { int unit; if (ops->head.refs == 0) { ops->head.id = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ops_id), 255); if (ops->head.id == -1) { /* Ran out of unique ids */ kprintf("devfs_reference_ops: WARNING: ran out of unique ids\n"); } } unit = ops->head.id; ++ops->head.refs; return unit; } void devfs_release_ops(struct dev_ops *ops) { --ops->head.refs; if (ops->head.refs == 0) { devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ops_id), ops->head.id); } } void devfs_config(void *arg) { devfs_msg_t msg; msg = devfs_msg_get(); kprintf("devfs_config: sync'ing up\n"); msg = devfs_msg_send_sync(DEVFS_SYNC, msg); devfs_msg_put(msg); } /* * Called on init of devfs; creates the objcaches and * spawns off the devfs core thread. Also initializes * locks. */ static void devfs_init(void) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init() called\n"); /* Create objcaches for nodes, msgs and devs */ devfs_node_cache = objcache_create("devfs-node-cache", 0, 0, NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free, &devfs_node_malloc_args ); devfs_msg_cache = objcache_create("devfs-msg-cache", 0, 0, NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free, &devfs_msg_malloc_args ); devfs_dev_cache = objcache_create("devfs-dev-cache", 0, 0, NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free, &devfs_dev_malloc_args ); devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(ops_id)); #if 0 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(ops_id), 0); #endif /* Initialize the reply-only port which acts as a message drain */ lwkt_initport_replyonly(&devfs_dispose_port, devfs_msg_autofree_reply); /* Initialize *THE* devfs lock */ lockinit(&devfs_lock, "devfs_core lock", 0, 0); lwkt_create(devfs_msg_core, /*args*/NULL, &td_core, NULL, 0, 0, "devfs_msg_core"); tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_init finished\n"); } /* * Called on unload of devfs; takes care of destroying the core * and the objcaches. Also removes aliases that are no longer needed. */ static void devfs_uninit(void) { devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_uninit() called\n"); devfs_msg_send(DEVFS_TERMINATE_CORE, NULL); tsleep(td_core/*devfs_id*/, 0, "devfsc", 0); tsleep(td_core/*devfs_id*/, 0, "devfsc", 10000); devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(ops_id)); /* Destroy the objcaches */ objcache_destroy(devfs_msg_cache); objcache_destroy(devfs_node_cache); objcache_destroy(devfs_dev_cache); devfs_alias_reap(); } /* * This is a sysctl handler to assist userland devname(3) to * find the device name for a given udev. */ static int devfs_sysctl_devname_helper(SYSCTL_HANDLER_ARGS) { udev_t udev; cdev_t found; int error; if ((error = SYSCTL_IN(req, &udev, sizeof(udev_t)))) return (error); devfs_debug(DEVFS_DEBUG_DEBUG, "devfs sysctl, received udev: %d\n", udev); if (udev == NOUDEV) return(EINVAL); if ((found = devfs_find_device_by_udev(udev)) == NULL) return(ENOENT); return(SYSCTL_OUT(req, found->si_name, strlen(found->si_name) + 1)); } SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY, NULL, 0, devfs_sysctl_devname_helper, "", "helper for devname(3)"); static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); TUNABLE_INT("vfs.devfs.debug", &devfs_debug_enable); SYSCTL_INT(_vfs_devfs, OID_AUTO, debug, CTLFLAG_RW, &devfs_debug_enable, 0, "Enable DevFS debugging"); SYSINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, devfs_init, NULL); SYSUNINIT(vfs_devfs_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, devfs_uninit, NULL);