From: Venkatesh Srinivas Date: Wed, 6 Apr 2011 02:02:10 +0000 (-0700) Subject: kernel -- Remove unused vfs rangelocks from vnodes. X-Git-Tag: v2.11.0~88 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/9013609878f45104b5e2df0e90c0549fd7ed2f0c kernel -- Remove unused vfs rangelocks from vnodes. --- diff --git a/sys/conf/files b/sys/conf/files index f4527da68a..95bcdc797e 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1021,7 +1021,6 @@ kern/vfs_subr.c standard kern/vfs_vm.c standard kern/vfs_lock.c standard kern/vfs_mount.c standard -kern/vfs_rangelock.c standard kern/vfs_sync.c standard kern/vfs_synth.c standard kern/vfs_syscalls.c standard diff --git a/sys/kern/vfs_rangelock.c b/sys/kern/vfs_rangelock.c deleted file mode 100644 index 06d452d438..0000000000 --- a/sys/kern/vfs_rangelock.c +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (c) 2004 The DragonFly Project. All rights reserved. - * - * This code is derived from software contributed to The DragonFly Project - * by Matthew Dillon - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * 3. Neither the name of The DragonFly Project nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific, prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED - * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $DragonFly: src/sys/kern/vfs_rangelock.c,v 1.2 2006/12/23 00:35:04 swildner Exp $ - */ -/* - * This module implements hard range locks for files and directories. It is - * not to be confused with the UNIX advisory lock mechanism. This module - * will allow the kernel and VFS to break large I/O requests into smaller - * pieces without losing atomicy guarentees and, eventually, this module will - * be responsible for providing hooks for remote cache coherency protocols - * as well. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include - -static void vrange_lock_overlapped(struct vnode *vp, - struct vrangelock *vr, struct vrangelock *scan); -static int vrange_lock_conflicted(struct vnode *vp, struct vrangelock *vr); - -/* - * Lock a range within a vnode. - * - * The lock list is sorted by vr_offset. - */ -void -vrange_lock(struct vnode *vp, struct vrangelock *vr) -{ - struct vrangelock *scan; - off_t eoff; - - eoff = vr->vr_offset + vr->vr_length; - - KKASSERT((vr->vr_flags & RNGL_ONLIST) == 0); - vr->vr_flags |= RNGL_ONLIST; - - TAILQ_FOREACH(scan, &vp->v_range.vh_list, vr_node) { - /* - * If the new element is entirely in front of the scan element - * we are done. If it is entirely beyond the scan element we - * loop. Otherwise an overlap has occured. - */ - if (eoff <= scan->vr_offset) { - TAILQ_INSERT_BEFORE(scan, vr, vr_node); - return; - } - if (vr->vr_offset >= scan->vr_offset + scan->vr_length) - continue; - vrange_lock_overlapped(vp, vr, scan); - } - TAILQ_INSERT_TAIL(&vp->v_range.vh_list, vr, vr_node); -} - -/* - * An overlap occured. The request is still inserted sorted based on - * vr_offset but we must also scan the conflict space and block while - * conflicts exist. - */ -static void -vrange_lock_overlapped(struct vnode *vp, - struct vrangelock *vr, struct vrangelock *scan) -{ - int conflicted = 0; - int inserted = 0; - int warned = 0; - off_t eoff; - - eoff = vr->vr_offset + vr->vr_length; - - while (scan->vr_offset < eoff) { - if ((vr->vr_flags & scan->vr_flags & RNGL_SHARED) == 0) { - scan->vr_flags |= RNGL_CHECK; - vr->vr_flags |= RNGL_WAITING; - conflicted = 1; - } - if (inserted == 0 && vr->vr_offset < scan->vr_offset) { - TAILQ_INSERT_BEFORE(scan, vr, vr_node); - inserted = 1; - } - if ((scan = TAILQ_NEXT(scan, vr_node)) == NULL) { - if (inserted == 0) - TAILQ_INSERT_TAIL(&vp->v_range.vh_list, vr, vr_node); - break; - } - } - - /* - * sleep until the conflict has been resolved. - */ - while (conflicted) { - if (tsleep(&vp->v_range.vh_list, 0, "vrnglk", hz * 3) == EWOULDBLOCK) { - if (warned == 0) - kprintf("warning: conflicted lock vp %p %lld,%lld blocked\n", - vp, (long long)vr->vr_offset, (long long)vr->vr_length); - warned = 1; - } - conflicted = vrange_lock_conflicted(vp, vr); - } - if (warned) { - kprintf("waring: conflicted lock vp %p %lld,%lld unblocked\n", - vp, (long long)vr->vr_offset, (long long)vr->vr_length); - } -} - -/* - * Check for conflicts by scanning both forwards and backwards from the - * node in question. The list is sorted by vr_offset but ending offsets - * may vary. Because of this, the reverse scan cannot stop early. - * - * Return 0 on success, 1 if the lock is still conflicted. We do not - * check elements that are waiting as that might result in a deadlock. - * We can stop the moment we hit a conflict. - */ -static int -vrange_lock_conflicted(struct vnode *vp, struct vrangelock *vr) -{ - struct vrangelock *scan; - off_t eoff; - - eoff = vr->vr_offset + vr->vr_length; - - KKASSERT(vr->vr_flags & RNGL_WAITING); - scan = vr; - while ((scan = TAILQ_PREV(scan, vrangelock_list, vr_node)) != NULL) { - if (scan->vr_flags & RNGL_WAITING) - continue; - if (scan->vr_offset + scan->vr_length > vr->vr_offset) { - if ((vr->vr_flags & scan->vr_flags & RNGL_SHARED) == 0) { - scan->vr_flags |= RNGL_CHECK; - return(1); - } - } - } - scan = vr; - while ((scan = TAILQ_NEXT(scan, vr_node)) != NULL) { - if (eoff <= scan->vr_offset) - break; - if (scan->vr_flags & RNGL_WAITING) - continue; - if ((vr->vr_flags & scan->vr_flags & RNGL_SHARED) == 0) { - scan->vr_flags |= RNGL_CHECK; - return(1); - } - } - vr->vr_flags &= ~RNGL_WAITING; - return(0); -} - -void -vrange_unlock(struct vnode *vp, struct vrangelock *vr) -{ - KKASSERT((vr->vr_flags & RNGL_ONLIST) != 0); - vr->vr_flags &= ~RNGL_ONLIST; - TAILQ_REMOVE(&vp->v_range.vh_list, vr, vr_node); - if (vr->vr_flags & RNGL_CHECK) { - vr->vr_flags &= ~RNGL_CHECK; - wakeup(&vp->v_range.vh_list); - } -} - diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index cee6021248..fd95f86bb3 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -106,69 +106,6 @@ struct mountctl_opt { const char *o_name; }; -/* - * Range locks protect offset ranges in files and directories at a high - * level, allowing the actual I/O to be broken down into smaller pieces. - * Range locks will eventually be integrated into the clustered cache - * coherency infrastructure. - * - * We use a simple data structure for now, but eventually this should - * probably be a btree or red-black tree. - */ -struct vrangelock; - -TAILQ_HEAD(vrangelock_list, vrangelock); - -struct vrangehead { - struct vrangelock_list vh_list; -}; - -struct vrangelock { - TAILQ_ENTRY(vrangelock) vr_node; - int vr_flags; - off_t vr_offset; - off_t vr_length; -}; - -#define RNGL_WAITING 0x0001 /* waiting for lock, else has lock */ -#define RNGL_CHECK 0x0002 /* check for work on unlock */ -#define RNGL_SHARED 0x0004 /* shared lock, else exclusive */ -#define RNGL_ONLIST 0x0008 /* sanity check */ - -static __inline -void -vrange_init(struct vrangelock *vr, int flags, off_t offset, off_t length) -{ - vr->vr_flags = flags; - vr->vr_offset = offset; - vr->vr_length = length; -} - -#ifdef _KERNEL - -void vrange_lock(struct vnode *vp, struct vrangelock *vr); -void vrange_unlock(struct vnode *vp, struct vrangelock *vr); - -static __inline -void -vrange_lock_shared(struct vnode *vp, struct vrangelock *vr, - off_t offset, off_t length) -{ - vrange_init(vr, RNGL_SHARED, offset, length); - vrange_lock(vp, vr); -} - -static __inline -void -vrange_lock_excl(struct vnode *vp, struct vrangelock *vr, - off_t offset, off_t length) -{ - vrange_init(vr, 0, offset, length); - vrange_lock(vp, vr); -} - -#endif - /* * The vnode infrastructure is being reorgranized. Most reference-related * fields are locked by the BGL, and most file I/O related operations and @@ -259,13 +196,11 @@ struct vnode { struct kqinfo vpi_kqinfo; /* identity of poller(s) */ } v_pollinfo; struct vmresident *v_resident; /* optional vmresident */ - struct vrangehead v_range; /* range lock */ struct ccms_dataspace v_ccms; /* cache coherency */ #ifdef DEBUG_LOCKS const char *filename; /* Source file doing locking */ int line; /* Line number doing locking */ #endif - void *v_xaddr; }; #define v_socket v_un.vu_socket #define v_umajor v_un.vu_cdev.vu_umajor