2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3 * Authors: Doug Rabson <dfr@rabson.org>
4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Copyright (c) 1982, 1986, 1989, 1993
29 * The Regents of the University of California. All rights reserved.
31 * This code is derived from software contributed to Berkeley by
32 * Scooter Morris at Genentech Inc.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
64 #include "opt_debug_lockf.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/limits.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
80 #include <sys/lockf.h>
81 #include <sys/taskqueue.h>
84 #include <sys/sysctl.h>
86 #include <ufs/ufs/extattr.h>
87 #include <ufs/ufs/quota.h>
88 #include <ufs/ufs/ufsmount.h>
89 #include <ufs/ufs/inode.h>
91 static int lockf_debug = 0; /* control debug output */
92 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
95 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
99 struct owner_vertex_list;
102 #define NOLOCKF (struct lockf_entry *)0
105 static void lf_init(void *);
106 static int lf_hash_owner(caddr_t, struct flock *, int);
107 static int lf_owner_matches(struct lock_owner *, caddr_t, struct flock *,
109 static struct lockf_entry *
110 lf_alloc_lock(struct lock_owner *);
111 static int lf_free_lock(struct lockf_entry *);
112 static int lf_clearlock(struct lockf *, struct lockf_entry *);
113 static int lf_overlaps(struct lockf_entry *, struct lockf_entry *);
114 static int lf_blocks(struct lockf_entry *, struct lockf_entry *);
115 static void lf_free_edge(struct lockf_edge *);
116 static struct lockf_edge *
118 static void lf_alloc_vertex(struct lockf_entry *);
119 static int lf_add_edge(struct lockf_entry *, struct lockf_entry *);
120 static void lf_remove_edge(struct lockf_edge *);
121 static void lf_remove_outgoing(struct lockf_entry *);
122 static void lf_remove_incoming(struct lockf_entry *);
123 static int lf_add_outgoing(struct lockf *, struct lockf_entry *);
124 static int lf_add_incoming(struct lockf *, struct lockf_entry *);
125 static int lf_findoverlap(struct lockf_entry **, struct lockf_entry *,
127 static struct lockf_entry *
128 lf_getblock(struct lockf *, struct lockf_entry *);
129 static int lf_getlock(struct lockf *, struct lockf_entry *, struct flock *);
130 static void lf_insert_lock(struct lockf *, struct lockf_entry *);
131 static void lf_wakeup_lock(struct lockf *, struct lockf_entry *);
132 static void lf_update_dependancies(struct lockf *, struct lockf_entry *,
133 int all, struct lockf_entry_list *);
134 static void lf_set_start(struct lockf *, struct lockf_entry *, off_t,
135 struct lockf_entry_list*);
136 static void lf_set_end(struct lockf *, struct lockf_entry *, off_t,
137 struct lockf_entry_list*);
138 static int lf_setlock(struct lockf *, struct lockf_entry *,
139 struct vnode *, void **cookiep);
140 static int lf_cancel(struct lockf *, struct lockf_entry *, void *);
141 static void lf_split(struct lockf *, struct lockf_entry *,
142 struct lockf_entry *, struct lockf_entry_list *);
144 static int graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
145 struct owner_vertex_list *path);
146 static void graph_check(struct owner_graph *g, int checkorder);
147 static void graph_print_vertices(struct owner_vertex_list *set);
149 static int graph_delta_forward(struct owner_graph *g,
150 struct owner_vertex *x, struct owner_vertex *y,
151 struct owner_vertex_list *delta);
152 static int graph_delta_backward(struct owner_graph *g,
153 struct owner_vertex *x, struct owner_vertex *y,
154 struct owner_vertex_list *delta);
155 static int graph_add_indices(int *indices, int n,
156 struct owner_vertex_list *set);
157 static int graph_assign_indices(struct owner_graph *g, int *indices,
158 int nextunused, struct owner_vertex_list *set);
159 static int graph_add_edge(struct owner_graph *g,
160 struct owner_vertex *x, struct owner_vertex *y);
161 static void graph_remove_edge(struct owner_graph *g,
162 struct owner_vertex *x, struct owner_vertex *y);
163 static struct owner_vertex *graph_alloc_vertex(struct owner_graph *g,
164 struct lock_owner *lo);
165 static void graph_free_vertex(struct owner_graph *g,
166 struct owner_vertex *v);
167 static struct owner_graph * graph_init(struct owner_graph *g);
169 static void lf_print(char *, struct lockf_entry *);
170 static void lf_printlist(char *, struct lockf_entry *);
171 static void lf_print_owner(struct lock_owner *);
175 * This structure is used to keep track of both local and remote lock
176 * owners. The lf_owner field of the struct lockf_entry points back at
177 * the lock owner structure. Each possible lock owner (local proc for
178 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
179 * pair for remote locks) is represented by a unique instance of
182 * If a lock owner has a lock that blocks some other lock or a lock
183 * that is waiting for some other lock, it also has a vertex in the
187 * (s) locked by state->ls_lock
188 * (S) locked by lf_lock_states_lock
189 * (l) locked by lf_lock_owners_lock
190 * (g) locked by lf_owner_graph_lock
191 * (c) const until freeing
193 #define LOCK_OWNER_HASH_SIZE 256
196 LIST_ENTRY(lock_owner) lo_link; /* (l) hash chain */
197 int lo_refs; /* (l) Number of locks referring to this */
198 int lo_flags; /* (c) Flags passwd to lf_advlock */
199 caddr_t lo_id; /* (c) Id value passed to lf_advlock */
200 pid_t lo_pid; /* (c) Process Id of the lock owner */
201 int lo_sysid; /* (c) System Id of the lock owner */
202 struct owner_vertex *lo_vertex; /* (g) entry in deadlock graph */
205 LIST_HEAD(lock_owner_list, lock_owner);
207 static struct sx lf_lock_states_lock;
208 static struct lockf_list lf_lock_states; /* (S) */
209 static struct sx lf_lock_owners_lock;
210 static struct lock_owner_list lf_lock_owners[LOCK_OWNER_HASH_SIZE]; /* (l) */
213 * Structures for deadlock detection.
215 * We have two types of directed graph, the first is the set of locks,
216 * both active and pending on a vnode. Within this graph, active locks
217 * are terminal nodes in the graph (i.e. have no out-going
218 * edges). Pending locks have out-going edges to each blocking active
219 * lock that prevents the lock from being granted and also to each
220 * older pending lock that would block them if it was active. The
221 * graph for each vnode is naturally acyclic; new edges are only ever
222 * added to or from new nodes (either new pending locks which only add
223 * out-going edges or new active locks which only add in-coming edges)
224 * therefore they cannot create loops in the lock graph.
226 * The second graph is a global graph of lock owners. Each lock owner
227 * is a vertex in that graph and an edge is added to the graph
228 * whenever an edge is added to a vnode graph, with end points
229 * corresponding to owner of the new pending lock and the owner of the
230 * lock upon which it waits. In order to prevent deadlock, we only add
231 * an edge to this graph if the new edge would not create a cycle.
233 * The lock owner graph is topologically sorted, i.e. if a node has
234 * any outgoing edges, then it has an order strictly less than any
235 * node to which it has an outgoing edge. We preserve this ordering
236 * (and detect cycles) on edge insertion using Algorithm PK from the
237 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
238 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
244 LIST_ENTRY(owner_edge) e_outlink; /* (g) link from's out-edge list */
245 LIST_ENTRY(owner_edge) e_inlink; /* (g) link to's in-edge list */
246 int e_refs; /* (g) number of times added */
247 struct owner_vertex *e_from; /* (c) out-going from here */
248 struct owner_vertex *e_to; /* (c) in-coming to here */
250 LIST_HEAD(owner_edge_list, owner_edge);
252 struct owner_vertex {
253 TAILQ_ENTRY(owner_vertex) v_link; /* (g) workspace for edge insertion */
254 uint32_t v_gen; /* (g) workspace for edge insertion */
255 int v_order; /* (g) order of vertex in graph */
256 struct owner_edge_list v_outedges;/* (g) list of out-edges */
257 struct owner_edge_list v_inedges; /* (g) list of in-edges */
258 struct lock_owner *v_owner; /* (c) corresponding lock owner */
260 TAILQ_HEAD(owner_vertex_list, owner_vertex);
263 struct owner_vertex** g_vertices; /* (g) pointers to vertices */
264 int g_size; /* (g) number of vertices */
265 int g_space; /* (g) space allocated for vertices */
266 int *g_indexbuf; /* (g) workspace for loop detection */
267 uint32_t g_gen; /* (g) increment when re-ordering */
270 static struct sx lf_owner_graph_lock;
271 static struct owner_graph lf_owner_graph;
274 * Initialise various structures and locks.
281 sx_init(&lf_lock_states_lock, "lock states lock");
282 LIST_INIT(&lf_lock_states);
284 sx_init(&lf_lock_owners_lock, "lock owners lock");
285 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
286 LIST_INIT(&lf_lock_owners[i]);
288 sx_init(&lf_owner_graph_lock, "owner graph lock");
289 graph_init(&lf_owner_graph);
291 SYSINIT(lf_init, SI_SUB_LOCK, SI_ORDER_FIRST, lf_init, NULL);
294 * Generate a hash value for a lock owner.
297 lf_hash_owner(caddr_t id, struct flock *fl, int flags)
301 if (flags & F_REMOTE) {
302 h = HASHSTEP(0, fl->l_pid);
303 h = HASHSTEP(h, fl->l_sysid);
304 } else if (flags & F_FLOCK) {
305 h = ((uintptr_t) id) >> 7;
307 struct proc *p = (struct proc *) id;
308 h = HASHSTEP(0, p->p_pid);
312 return (h % LOCK_OWNER_HASH_SIZE);
316 * Return true if a lock owner matches the details passed to
320 lf_owner_matches(struct lock_owner *lo, caddr_t id, struct flock *fl,
323 if (flags & F_REMOTE) {
324 return lo->lo_pid == fl->l_pid
325 && lo->lo_sysid == fl->l_sysid;
327 return lo->lo_id == id;
331 static struct lockf_entry *
332 lf_alloc_lock(struct lock_owner *lo)
334 struct lockf_entry *lf;
336 lf = malloc(sizeof(struct lockf_entry), M_LOCKF, M_WAITOK|M_ZERO);
340 printf("Allocated lock %p\n", lf);
343 sx_xlock(&lf_lock_owners_lock);
345 sx_xunlock(&lf_lock_owners_lock);
353 lf_free_lock(struct lockf_entry *lock)
356 KASSERT(lock->lf_refs > 0, ("lockf_entry negative ref count %p", lock));
357 if (--lock->lf_refs > 0)
360 * Adjust the lock_owner reference count and
361 * reclaim the entry if this is the last lock
364 struct lock_owner *lo = lock->lf_owner;
366 KASSERT(LIST_EMPTY(&lock->lf_outedges),
367 ("freeing lock with dependencies"));
368 KASSERT(LIST_EMPTY(&lock->lf_inedges),
369 ("freeing lock with dependants"));
370 sx_xlock(&lf_lock_owners_lock);
371 KASSERT(lo->lo_refs > 0, ("lock owner refcount"));
373 if (lo->lo_refs == 0) {
376 printf("lf_free_lock: freeing lock owner %p\n",
380 sx_xlock(&lf_owner_graph_lock);
381 graph_free_vertex(&lf_owner_graph,
383 sx_xunlock(&lf_owner_graph_lock);
385 LIST_REMOVE(lo, lo_link);
389 printf("Freed lock owner %p\n", lo);
392 sx_unlock(&lf_lock_owners_lock);
394 if ((lock->lf_flags & F_REMOTE) && lock->lf_vnode) {
395 vrele(lock->lf_vnode);
396 lock->lf_vnode = NULL;
400 printf("Freed lock %p\n", lock);
407 * Advisory record locking support
410 lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep,
413 struct lockf *state, *freestate = NULL;
414 struct flock *fl = ap->a_fl;
415 struct lockf_entry *lock;
416 struct vnode *vp = ap->a_vp;
417 caddr_t id = ap->a_id;
418 int flags = ap->a_flags;
420 struct lock_owner *lo;
421 off_t start, end, oadd;
425 * Handle the F_UNLKSYS case first - no need to mess about
426 * creating a lock owner for this one.
428 if (ap->a_op == F_UNLCKSYS) {
429 lf_clearremotesys(fl->l_sysid);
434 * Convert the flock structure into a start and end.
436 switch (fl->l_whence) {
441 * Caller is responsible for adding any necessary offset
442 * when SEEK_CUR is used.
448 if (size > OFF_MAX ||
449 (fl->l_start > 0 && size > OFF_MAX - fl->l_start))
451 start = size + fl->l_start;
466 } else if (fl->l_len == 0) {
469 oadd = fl->l_len - 1;
470 if (oadd > OFF_MAX - start)
478 * Avoid the common case of unlocking when inode has no locks.
481 if ((*statep) == NULL) {
482 if (ap->a_op != F_SETLK) {
483 fl->l_type = F_UNLCK;
491 * Map our arguments to an existing lock owner or create one
492 * if this is the first time we have seen this owner.
494 hash = lf_hash_owner(id, fl, flags);
495 sx_xlock(&lf_lock_owners_lock);
496 LIST_FOREACH(lo, &lf_lock_owners[hash], lo_link)
497 if (lf_owner_matches(lo, id, fl, flags))
501 * We initialise the lock with a reference
502 * count which matches the new lockf_entry
503 * structure created below.
505 lo = malloc(sizeof(struct lock_owner), M_LOCKF,
509 printf("Allocated lock owner %p\n", lo);
513 lo->lo_flags = flags;
515 if (flags & F_REMOTE) {
516 lo->lo_pid = fl->l_pid;
517 lo->lo_sysid = fl->l_sysid;
518 } else if (flags & F_FLOCK) {
522 struct proc *p = (struct proc *) id;
523 lo->lo_pid = p->p_pid;
526 lo->lo_vertex = NULL;
529 if (lockf_debug & 1) {
530 printf("lf_advlockasync: new lock owner %p ", lo);
536 LIST_INSERT_HEAD(&lf_lock_owners[hash], lo, lo_link);
539 * We have seen this lock owner before, increase its
540 * reference count to account for the new lockf_entry
541 * structure we create below.
545 sx_xunlock(&lf_lock_owners_lock);
548 * Create the lockf structure. We initialise the lf_owner
549 * field here instead of in lf_alloc_lock() to avoid paying
550 * the lf_lock_owners_lock tax twice.
552 lock = lf_alloc_lock(NULL);
554 lock->lf_start = start;
558 if (flags & F_REMOTE) {
560 * For remote locks, the caller may release its ref to
561 * the vnode at any time - we have to ref it here to
562 * prevent it from being recycled unexpectedly.
568 * XXX The problem is that VTOI is ufs specific, so it will
569 * break LOCKF_DEBUG for all other FS's other than UFS because
570 * it casts the vnode->data ptr to struct inode *.
572 /* lock->lf_inode = VTOI(ap->a_vp); */
573 lock->lf_inode = (struct inode *)0;
574 lock->lf_type = fl->l_type;
575 LIST_INIT(&lock->lf_outedges);
576 LIST_INIT(&lock->lf_inedges);
577 lock->lf_async_task = ap->a_task;
578 lock->lf_flags = ap->a_flags;
581 * Do the requested operation. First find our state structure
582 * and create a new one if necessary - the caller's *statep
583 * variable and the state's ls_threads count is protected by
584 * the vnode interlock.
587 if (vp->v_iflag & VI_DOOMED) {
594 * Allocate a state structure if necessary.
602 ls = malloc(sizeof(struct lockf), M_LOCKF, M_WAITOK|M_ZERO);
603 sx_init(&ls->ls_lock, "ls_lock");
604 LIST_INIT(&ls->ls_active);
605 LIST_INIT(&ls->ls_pending);
608 sx_xlock(&lf_lock_states_lock);
609 LIST_INSERT_HEAD(&lf_lock_states, ls, ls_link);
610 sx_xunlock(&lf_lock_states_lock);
613 * Cope if we lost a race with some other thread while
614 * trying to allocate memory.
617 if (vp->v_iflag & VI_DOOMED) {
619 sx_xlock(&lf_lock_states_lock);
620 LIST_REMOVE(ls, ls_link);
621 sx_xunlock(&lf_lock_states_lock);
622 sx_destroy(&ls->ls_lock);
627 if ((*statep) == NULL) {
628 state = *statep = ls;
635 sx_xlock(&lf_lock_states_lock);
636 LIST_REMOVE(ls, ls_link);
637 sx_xunlock(&lf_lock_states_lock);
638 sx_destroy(&ls->ls_lock);
646 sx_xlock(&state->ls_lock);
648 * Recheck the doomed vnode after state->ls_lock is
649 * locked. lf_purgelocks() requires that no new threads add
650 * pending locks when vnode is marked by VI_DOOMED flag.
653 if (vp->v_iflag & VI_DOOMED) {
657 sx_xunlock(&state->ls_lock);
665 error = lf_setlock(state, lock, vp, ap->a_cookiep);
669 error = lf_clearlock(state, lock);
674 error = lf_getlock(state, lock, fl);
680 error = lf_cancel(state, lock, *ap->a_cookiep);
694 * Check for some can't happen stuff. In this case, the active
695 * lock list becoming disordered or containing mutually
696 * blocking locks. We also check the pending list for locks
697 * which should be active (i.e. have no out-going edges).
699 LIST_FOREACH(lock, &state->ls_active, lf_link) {
700 struct lockf_entry *lf;
701 if (LIST_NEXT(lock, lf_link))
702 KASSERT((lock->lf_start
703 <= LIST_NEXT(lock, lf_link)->lf_start),
704 ("locks disordered"));
705 LIST_FOREACH(lf, &state->ls_active, lf_link) {
708 KASSERT(!lf_blocks(lock, lf),
709 ("two conflicting active locks"));
710 if (lock->lf_owner == lf->lf_owner)
711 KASSERT(!lf_overlaps(lock, lf),
712 ("two overlapping locks from same owner"));
715 LIST_FOREACH(lock, &state->ls_pending, lf_link) {
716 KASSERT(!LIST_EMPTY(&lock->lf_outedges),
717 ("pending lock which should be active"));
720 sx_xunlock(&state->ls_lock);
723 * If we have removed the last active lock on the vnode and
724 * this is the last thread that was in-progress, we can free
725 * the state structure. We update the caller's pointer inside
726 * the vnode interlock but call free outside.
728 * XXX alternatively, keep the state structure around until
729 * the filesystem recycles - requires a callback from the
736 if (LIST_EMPTY(&state->ls_active) && state->ls_threads == 0) {
737 KASSERT(LIST_EMPTY(&state->ls_pending),
738 ("freeing state with pending locks"));
745 if (freestate != NULL) {
746 sx_xlock(&lf_lock_states_lock);
747 LIST_REMOVE(freestate, ls_link);
748 sx_xunlock(&lf_lock_states_lock);
749 sx_destroy(&freestate->ls_lock);
750 free(freestate, M_LOCKF);
754 if (error == EDOOFUS) {
755 KASSERT(ap->a_op == F_SETLK, ("EDOOFUS"));
762 lf_advlock(struct vop_advlock_args *ap, struct lockf **statep, u_quad_t size)
764 struct vop_advlockasync_args a;
770 a.a_flags = ap->a_flags;
774 return (lf_advlockasync(&a, statep, size));
778 lf_purgelocks(struct vnode *vp, struct lockf **statep)
781 struct lockf_entry *lock, *nlock;
784 * For this to work correctly, the caller must ensure that no
785 * other threads enter the locking system for this vnode,
786 * e.g. by checking VI_DOOMED. We wake up any threads that are
787 * sleeping waiting for locks on this vnode and then free all
788 * the remaining locks.
791 KASSERT(vp->v_iflag & VI_DOOMED,
792 ("lf_purgelocks: vp %p has not vgone yet", vp));
799 sx_xlock(&state->ls_lock);
800 sx_xlock(&lf_owner_graph_lock);
801 LIST_FOREACH_SAFE(lock, &state->ls_pending, lf_link, nlock) {
802 LIST_REMOVE(lock, lf_link);
803 lf_remove_outgoing(lock);
804 lf_remove_incoming(lock);
807 * If its an async lock, we can just free it
808 * here, otherwise we let the sleeping thread
811 if (lock->lf_async_task) {
814 lock->lf_flags |= F_INTR;
818 sx_xunlock(&lf_owner_graph_lock);
819 sx_xunlock(&state->ls_lock);
822 * Wait for all other threads, sleeping and otherwise
826 while (state->ls_threads > 1)
827 msleep(state, VI_MTX(vp), 0, "purgelocks", 0);
831 * We can just free all the active locks since they
832 * will have no dependencies (we removed them all
833 * above). We don't need to bother locking since we
834 * are the last thread using this state structure.
836 KASSERT(LIST_EMPTY(&state->ls_pending),
837 ("lock pending for %p", state));
838 LIST_FOREACH_SAFE(lock, &state->ls_active, lf_link, nlock) {
839 LIST_REMOVE(lock, lf_link);
842 sx_xlock(&lf_lock_states_lock);
843 LIST_REMOVE(state, ls_link);
844 sx_xunlock(&lf_lock_states_lock);
845 sx_destroy(&state->ls_lock);
846 free(state, M_LOCKF);
853 * Return non-zero if locks 'x' and 'y' overlap.
856 lf_overlaps(struct lockf_entry *x, struct lockf_entry *y)
859 return (x->lf_start <= y->lf_end && x->lf_end >= y->lf_start);
863 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
866 lf_blocks(struct lockf_entry *x, struct lockf_entry *y)
869 return x->lf_owner != y->lf_owner
870 && (x->lf_type == F_WRLCK || y->lf_type == F_WRLCK)
871 && lf_overlaps(x, y);
875 * Allocate a lock edge from the free list
877 static struct lockf_edge *
881 return (malloc(sizeof(struct lockf_edge), M_LOCKF, M_WAITOK|M_ZERO));
888 lf_free_edge(struct lockf_edge *e)
896 * Ensure that the lock's owner has a corresponding vertex in the
900 lf_alloc_vertex(struct lockf_entry *lock)
902 struct owner_graph *g = &lf_owner_graph;
904 if (!lock->lf_owner->lo_vertex)
905 lock->lf_owner->lo_vertex =
906 graph_alloc_vertex(g, lock->lf_owner);
910 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
911 * the new edge would cause a cycle in the owner graph.
914 lf_add_edge(struct lockf_entry *x, struct lockf_entry *y)
916 struct owner_graph *g = &lf_owner_graph;
917 struct lockf_edge *e;
921 LIST_FOREACH(e, &x->lf_outedges, le_outlink)
922 KASSERT(e->le_to != y, ("adding lock edge twice"));
926 * Make sure the two owners have entries in the owner graph.
931 error = graph_add_edge(g, x->lf_owner->lo_vertex,
932 y->lf_owner->lo_vertex);
937 LIST_INSERT_HEAD(&x->lf_outedges, e, le_outlink);
938 LIST_INSERT_HEAD(&y->lf_inedges, e, le_inlink);
946 * Remove an edge from the lock graph.
949 lf_remove_edge(struct lockf_edge *e)
951 struct owner_graph *g = &lf_owner_graph;
952 struct lockf_entry *x = e->le_from;
953 struct lockf_entry *y = e->le_to;
955 graph_remove_edge(g, x->lf_owner->lo_vertex, y->lf_owner->lo_vertex);
956 LIST_REMOVE(e, le_outlink);
957 LIST_REMOVE(e, le_inlink);
964 * Remove all out-going edges from lock x.
967 lf_remove_outgoing(struct lockf_entry *x)
969 struct lockf_edge *e;
971 while ((e = LIST_FIRST(&x->lf_outedges)) != NULL) {
977 * Remove all in-coming edges from lock x.
980 lf_remove_incoming(struct lockf_entry *x)
982 struct lockf_edge *e;
984 while ((e = LIST_FIRST(&x->lf_inedges)) != NULL) {
990 * Walk the list of locks for the file and create an out-going edge
991 * from lock to each blocking lock.
994 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock)
996 struct lockf_entry *overlap;
999 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1001 * We may assume that the active list is sorted by
1004 if (overlap->lf_start > lock->lf_end)
1006 if (!lf_blocks(lock, overlap))
1010 * We've found a blocking lock. Add the corresponding
1011 * edge to the graphs and see if it would cause a
1014 error = lf_add_edge(lock, overlap);
1017 * The only error that lf_add_edge returns is EDEADLK.
1018 * Remove any edges we added and return the error.
1021 lf_remove_outgoing(lock);
1027 * We also need to add edges to sleeping locks that block
1028 * us. This ensures that lf_wakeup_lock cannot grant two
1029 * mutually blocking locks simultaneously and also enforces a
1030 * 'first come, first served' fairness model. Note that this
1031 * only happens if we are blocked by at least one active lock
1032 * due to the call to lf_getblock in lf_setlock below.
1034 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1035 if (!lf_blocks(lock, overlap))
1038 * We've found a blocking lock. Add the corresponding
1039 * edge to the graphs and see if it would cause a
1042 error = lf_add_edge(lock, overlap);
1045 * The only error that lf_add_edge returns is EDEADLK.
1046 * Remove any edges we added and return the error.
1049 lf_remove_outgoing(lock);
1058 * Walk the list of pending locks for the file and create an in-coming
1059 * edge from lock to each blocking lock.
1062 lf_add_incoming(struct lockf *state, struct lockf_entry *lock)
1064 struct lockf_entry *overlap;
1067 LIST_FOREACH(overlap, &state->ls_pending, lf_link) {
1068 if (!lf_blocks(lock, overlap))
1072 * We've found a blocking lock. Add the corresponding
1073 * edge to the graphs and see if it would cause a
1076 error = lf_add_edge(overlap, lock);
1079 * The only error that lf_add_edge returns is EDEADLK.
1080 * Remove any edges we added and return the error.
1083 lf_remove_incoming(lock);
1091 * Insert lock into the active list, keeping list entries ordered by
1092 * increasing values of lf_start.
1095 lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
1097 struct lockf_entry *lf, *lfprev;
1099 if (LIST_EMPTY(&state->ls_active)) {
1100 LIST_INSERT_HEAD(&state->ls_active, lock, lf_link);
1105 LIST_FOREACH(lf, &state->ls_active, lf_link) {
1106 if (lf->lf_start > lock->lf_start) {
1107 LIST_INSERT_BEFORE(lf, lock, lf_link);
1112 LIST_INSERT_AFTER(lfprev, lock, lf_link);
1116 * Wake up a sleeping lock and remove it from the pending list now
1117 * that all its dependencies have been resolved. The caller should
1118 * arrange for the lock to be added to the active list, adjusting any
1119 * existing locks for the same owner as needed.
1122 lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
1126 * Remove from ls_pending list and wake up the caller
1127 * or start the async notification, as appropriate.
1129 LIST_REMOVE(wakelock, lf_link);
1131 if (lockf_debug & 1)
1132 lf_print("lf_wakeup_lock: awakening", wakelock);
1133 #endif /* LOCKF_DEBUG */
1134 if (wakelock->lf_async_task) {
1135 taskqueue_enqueue(taskqueue_thread, wakelock->lf_async_task);
1142 * Re-check all dependent locks and remove edges to locks that we no
1143 * longer block. If 'all' is non-zero, the lock has been removed and
1144 * we must remove all the dependencies, otherwise it has simply been
1145 * reduced but remains active. Any pending locks which have been been
1146 * unblocked are added to 'granted'
1149 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
1150 struct lockf_entry_list *granted)
1152 struct lockf_edge *e, *ne;
1153 struct lockf_entry *deplock;
1155 LIST_FOREACH_SAFE(e, &lock->lf_inedges, le_inlink, ne) {
1156 deplock = e->le_from;
1157 if (all || !lf_blocks(lock, deplock)) {
1158 sx_xlock(&lf_owner_graph_lock);
1160 sx_xunlock(&lf_owner_graph_lock);
1161 if (LIST_EMPTY(&deplock->lf_outedges)) {
1162 lf_wakeup_lock(state, deplock);
1163 LIST_INSERT_HEAD(granted, deplock, lf_link);
1170 * Set the start of an existing active lock, updating dependencies and
1171 * adding any newly woken locks to 'granted'.
1174 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
1175 struct lockf_entry_list *granted)
1178 KASSERT(new_start >= lock->lf_start, ("can't increase lock"));
1179 lock->lf_start = new_start;
1180 LIST_REMOVE(lock, lf_link);
1181 lf_insert_lock(state, lock);
1182 lf_update_dependancies(state, lock, FALSE, granted);
1186 * Set the end of an existing active lock, updating dependencies and
1187 * adding any newly woken locks to 'granted'.
1190 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
1191 struct lockf_entry_list *granted)
1194 KASSERT(new_end <= lock->lf_end, ("can't increase lock"));
1195 lock->lf_end = new_end;
1196 lf_update_dependancies(state, lock, FALSE, granted);
1200 * Add a lock to the active list, updating or removing any current
1201 * locks owned by the same owner and processing any pending locks that
1202 * become unblocked as a result. This code is also used for unlock
1203 * since the logic for updating existing locks is identical.
1205 * As a result of processing the new lock, we may unblock existing
1206 * pending locks as a result of downgrading/unlocking. We simply
1207 * activate the newly granted locks by looping.
1209 * Since the new lock already has its dependencies set up, we always
1210 * add it to the list (unless its an unlock request). This may
1211 * fragment the lock list in some pathological cases but its probably
1212 * not a real problem.
1215 lf_activate_lock(struct lockf *state, struct lockf_entry *lock)
1217 struct lockf_entry *overlap, *lf;
1218 struct lockf_entry_list granted;
1221 LIST_INIT(&granted);
1222 LIST_INSERT_HEAD(&granted, lock, lf_link);
1224 while (!LIST_EMPTY(&granted)) {
1225 lock = LIST_FIRST(&granted);
1226 LIST_REMOVE(lock, lf_link);
1229 * Skip over locks owned by other processes. Handle
1230 * any locks that overlap and are owned by ourselves.
1232 overlap = LIST_FIRST(&state->ls_active);
1234 ovcase = lf_findoverlap(&overlap, lock, SELF);
1237 if (ovcase && (lockf_debug & 2)) {
1238 printf("lf_setlock: overlap %d", ovcase);
1239 lf_print("", overlap);
1245 * 1) overlap == lock
1246 * 2) overlap contains lock
1247 * 3) lock contains overlap
1248 * 4) overlap starts before lock
1249 * 5) overlap ends after lock
1252 case 0: /* no overlap */
1255 case 1: /* overlap == lock */
1257 * We have already setup the
1258 * dependants for the new lock, taking
1259 * into account a possible downgrade
1260 * or unlock. Remove the old lock.
1262 LIST_REMOVE(overlap, lf_link);
1263 lf_update_dependancies(state, overlap, TRUE,
1265 lf_free_lock(overlap);
1268 case 2: /* overlap contains lock */
1270 * Just split the existing lock.
1272 lf_split(state, overlap, lock, &granted);
1275 case 3: /* lock contains overlap */
1277 * Delete the overlap and advance to
1278 * the next entry in the list.
1280 lf = LIST_NEXT(overlap, lf_link);
1281 LIST_REMOVE(overlap, lf_link);
1282 lf_update_dependancies(state, overlap, TRUE,
1284 lf_free_lock(overlap);
1288 case 4: /* overlap starts before lock */
1290 * Just update the overlap end and
1293 lf_set_end(state, overlap, lock->lf_start - 1,
1295 overlap = LIST_NEXT(overlap, lf_link);
1298 case 5: /* overlap ends after lock */
1300 * Change the start of overlap and
1303 lf_set_start(state, overlap, lock->lf_end + 1,
1310 if (lockf_debug & 1) {
1311 if (lock->lf_type != F_UNLCK)
1312 lf_print("lf_activate_lock: activated", lock);
1314 lf_print("lf_activate_lock: unlocked", lock);
1315 lf_printlist("lf_activate_lock", lock);
1317 #endif /* LOCKF_DEBUG */
1318 if (lock->lf_type != F_UNLCK)
1319 lf_insert_lock(state, lock);
1324 * Cancel a pending lock request, either as a result of a signal or a
1325 * cancel request for an async lock.
1328 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
1330 struct lockf_entry_list granted;
1333 * Note it is theoretically possible that cancelling this lock
1334 * may allow some other pending lock to become
1335 * active. Consider this case:
1337 * Owner Action Result Dependencies
1339 * A: lock [0..0] succeeds
1340 * B: lock [2..2] succeeds
1341 * C: lock [1..2] blocked C->B
1342 * D: lock [0..1] blocked C->B,D->A,D->C
1343 * A: unlock [0..0] C->B,D->C
1347 LIST_REMOVE(lock, lf_link);
1350 * Removing out-going edges is simple.
1352 sx_xlock(&lf_owner_graph_lock);
1353 lf_remove_outgoing(lock);
1354 sx_xunlock(&lf_owner_graph_lock);
1357 * Removing in-coming edges may allow some other lock to
1358 * become active - we use lf_update_dependancies to figure
1361 LIST_INIT(&granted);
1362 lf_update_dependancies(state, lock, TRUE, &granted);
1366 * Feed any newly active locks to lf_activate_lock.
1368 while (!LIST_EMPTY(&granted)) {
1369 lock = LIST_FIRST(&granted);
1370 LIST_REMOVE(lock, lf_link);
1371 lf_activate_lock(state, lock);
1376 * Set a byte-range lock.
1379 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp,
1382 static char lockstr[] = "lockf";
1383 int error, priority, stops_deferred;
1386 if (lockf_debug & 1)
1387 lf_print("lf_setlock", lock);
1388 #endif /* LOCKF_DEBUG */
1394 if (lock->lf_type == F_WRLCK)
1396 if (!(lock->lf_flags & F_NOINTR))
1399 * Scan lock list for this file looking for locks that would block us.
1401 if (lf_getblock(state, lock)) {
1403 * Free the structure and return if nonblocking.
1405 if ((lock->lf_flags & F_WAIT) == 0
1406 && lock->lf_async_task == NULL) {
1413 * For flock type locks, we must first remove
1414 * any shared locks that we hold before we sleep
1415 * waiting for an exclusive lock.
1417 if ((lock->lf_flags & F_FLOCK) &&
1418 lock->lf_type == F_WRLCK) {
1419 lock->lf_type = F_UNLCK;
1420 lf_activate_lock(state, lock);
1421 lock->lf_type = F_WRLCK;
1425 * We are blocked. Create edges to each blocking lock,
1426 * checking for deadlock using the owner graph. For
1427 * simplicity, we run deadlock detection for all
1428 * locks, posix and otherwise.
1430 sx_xlock(&lf_owner_graph_lock);
1431 error = lf_add_outgoing(state, lock);
1432 sx_xunlock(&lf_owner_graph_lock);
1436 if (lockf_debug & 1)
1437 lf_print("lf_setlock: deadlock", lock);
1444 * We have added edges to everything that blocks
1445 * us. Sleep until they all go away.
1447 LIST_INSERT_HEAD(&state->ls_pending, lock, lf_link);
1449 if (lockf_debug & 1) {
1450 struct lockf_edge *e;
1451 LIST_FOREACH(e, &lock->lf_outedges, le_outlink) {
1452 lf_print("lf_setlock: blocking on", e->le_to);
1453 lf_printlist("lf_setlock", e->le_to);
1456 #endif /* LOCKF_DEBUG */
1458 if ((lock->lf_flags & F_WAIT) == 0) {
1460 * The caller requested async notification -
1461 * this callback happens when the blocking
1462 * lock is released, allowing the caller to
1463 * make another attempt to take the lock.
1465 *cookiep = (void *) lock;
1466 error = EINPROGRESS;
1471 stops_deferred = sigdeferstop(SIGDEFERSTOP_ERESTART);
1472 error = sx_sleep(lock, &state->ls_lock, priority, lockstr, 0);
1473 sigallowstop(stops_deferred);
1474 if (lf_free_lock(lock)) {
1480 * We may have been awakened by a signal and/or by a
1481 * debugger continuing us (in which cases we must
1482 * remove our lock graph edges) and/or by another
1483 * process releasing a lock (in which case our edges
1484 * have already been removed and we have been moved to
1485 * the active list). We may also have been woken by
1486 * lf_purgelocks which we report to the caller as
1487 * EINTR. In that case, lf_purgelocks will have
1488 * removed our lock graph edges.
1490 * Note that it is possible to receive a signal after
1491 * we were successfully woken (and moved to the active
1492 * list) but before we resumed execution. In this
1493 * case, our lf_outedges list will be clear. We
1494 * pretend there was no error.
1496 * Note also, if we have been sleeping long enough, we
1497 * may now have incoming edges from some newer lock
1498 * which is waiting behind us in the queue.
1500 if (lock->lf_flags & F_INTR) {
1505 if (LIST_EMPTY(&lock->lf_outedges)) {
1508 lf_cancel_lock(state, lock);
1512 if (lockf_debug & 1) {
1513 lf_print("lf_setlock: granted", lock);
1519 * It looks like we are going to grant the lock. First add
1520 * edges from any currently pending lock that the new lock
1523 sx_xlock(&lf_owner_graph_lock);
1524 error = lf_add_incoming(state, lock);
1525 sx_xunlock(&lf_owner_graph_lock);
1528 if (lockf_debug & 1)
1529 lf_print("lf_setlock: deadlock", lock);
1536 * No blocks!! Add the lock. Note that we will
1537 * downgrade or upgrade any overlapping locks this
1538 * process already owns.
1540 lf_activate_lock(state, lock);
1547 * Remove a byte-range lock on an inode.
1549 * Generally, find the lock (or an overlap to that lock)
1550 * and remove it (or shrink it), then wakeup anyone we can.
1553 lf_clearlock(struct lockf *state, struct lockf_entry *unlock)
1555 struct lockf_entry *overlap;
1557 overlap = LIST_FIRST(&state->ls_active);
1559 if (overlap == NOLOCKF)
1562 if (unlock->lf_type != F_UNLCK)
1563 panic("lf_clearlock: bad type");
1564 if (lockf_debug & 1)
1565 lf_print("lf_clearlock", unlock);
1566 #endif /* LOCKF_DEBUG */
1568 lf_activate_lock(state, unlock);
1574 * Check whether there is a blocking lock, and if so return its
1578 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl)
1580 struct lockf_entry *block;
1583 if (lockf_debug & 1)
1584 lf_print("lf_getlock", lock);
1585 #endif /* LOCKF_DEBUG */
1587 if ((block = lf_getblock(state, lock))) {
1588 fl->l_type = block->lf_type;
1589 fl->l_whence = SEEK_SET;
1590 fl->l_start = block->lf_start;
1591 if (block->lf_end == OFF_MAX)
1594 fl->l_len = block->lf_end - block->lf_start + 1;
1595 fl->l_pid = block->lf_owner->lo_pid;
1596 fl->l_sysid = block->lf_owner->lo_sysid;
1598 fl->l_type = F_UNLCK;
1604 * Cancel an async lock request.
1607 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie)
1609 struct lockf_entry *reallock;
1612 * We need to match this request with an existing lock
1615 LIST_FOREACH(reallock, &state->ls_pending, lf_link) {
1616 if ((void *) reallock == cookie) {
1618 * Double-check that this lock looks right
1619 * (maybe use a rolling ID for the cancel
1622 if (!(reallock->lf_vnode == lock->lf_vnode
1623 && reallock->lf_start == lock->lf_start
1624 && reallock->lf_end == lock->lf_end)) {
1629 * Make sure this lock was async and then just
1630 * remove it from its wait lists.
1632 if (!reallock->lf_async_task) {
1637 * Note that since any other thread must take
1638 * state->ls_lock before it can possibly
1639 * trigger the async callback, we are safe
1640 * from a race with lf_wakeup_lock, i.e. we
1641 * can free the lock (actually our caller does
1644 lf_cancel_lock(state, reallock);
1650 * We didn't find a matching lock - not much we can do here.
1656 * Walk the list of locks for an inode and
1657 * return the first blocking lock.
1659 static struct lockf_entry *
1660 lf_getblock(struct lockf *state, struct lockf_entry *lock)
1662 struct lockf_entry *overlap;
1664 LIST_FOREACH(overlap, &state->ls_active, lf_link) {
1666 * We may assume that the active list is sorted by
1669 if (overlap->lf_start > lock->lf_end)
1671 if (!lf_blocks(lock, overlap))
1679 * Walk the list of locks for an inode to find an overlapping lock (if
1680 * any) and return a classification of that overlap.
1683 * *overlap The place in the lock list to start looking
1684 * lock The lock which is being tested
1685 * type Pass 'SELF' to test only locks with the same
1686 * owner as lock, or 'OTHER' to test only locks
1687 * with a different owner
1689 * Returns one of six values:
1691 * 1) overlap == lock
1692 * 2) overlap contains lock
1693 * 3) lock contains overlap
1694 * 4) overlap starts before lock
1695 * 5) overlap ends after lock
1697 * If there is an overlapping lock, '*overlap' is set to point at the
1700 * NOTE: this returns only the FIRST overlapping lock. There
1701 * may be more than one.
1704 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type)
1706 struct lockf_entry *lf;
1710 if ((*overlap) == NOLOCKF) {
1714 if (lockf_debug & 2)
1715 lf_print("lf_findoverlap: looking for overlap in", lock);
1716 #endif /* LOCKF_DEBUG */
1717 start = lock->lf_start;
1722 if (lf->lf_start > end)
1724 if (((type & SELF) && lf->lf_owner != lock->lf_owner) ||
1725 ((type & OTHERS) && lf->lf_owner == lock->lf_owner)) {
1726 *overlap = LIST_NEXT(lf, lf_link);
1730 if (lockf_debug & 2)
1731 lf_print("\tchecking", lf);
1732 #endif /* LOCKF_DEBUG */
1734 * OK, check for overlap
1738 * 1) overlap == lock
1739 * 2) overlap contains lock
1740 * 3) lock contains overlap
1741 * 4) overlap starts before lock
1742 * 5) overlap ends after lock
1744 if (start > lf->lf_end) {
1747 if (lockf_debug & 2)
1748 printf("no overlap\n");
1749 #endif /* LOCKF_DEBUG */
1750 *overlap = LIST_NEXT(lf, lf_link);
1753 if (lf->lf_start == start && lf->lf_end == end) {
1756 if (lockf_debug & 2)
1757 printf("overlap == lock\n");
1758 #endif /* LOCKF_DEBUG */
1762 if (lf->lf_start <= start && lf->lf_end >= end) {
1765 if (lockf_debug & 2)
1766 printf("overlap contains lock\n");
1767 #endif /* LOCKF_DEBUG */
1771 if (start <= lf->lf_start && end >= lf->lf_end) {
1774 if (lockf_debug & 2)
1775 printf("lock contains overlap\n");
1776 #endif /* LOCKF_DEBUG */
1780 if (lf->lf_start < start && lf->lf_end >= start) {
1783 if (lockf_debug & 2)
1784 printf("overlap starts before lock\n");
1785 #endif /* LOCKF_DEBUG */
1789 if (lf->lf_start > start && lf->lf_end > end) {
1792 if (lockf_debug & 2)
1793 printf("overlap ends after lock\n");
1794 #endif /* LOCKF_DEBUG */
1798 panic("lf_findoverlap: default");
1804 * Split an the existing 'lock1', based on the extent of the lock
1805 * described by 'lock2'. The existing lock should cover 'lock2'
1808 * Any pending locks which have been been unblocked are added to
1812 lf_split(struct lockf *state, struct lockf_entry *lock1,
1813 struct lockf_entry *lock2, struct lockf_entry_list *granted)
1815 struct lockf_entry *splitlock;
1818 if (lockf_debug & 2) {
1819 lf_print("lf_split", lock1);
1820 lf_print("splitting from", lock2);
1822 #endif /* LOCKF_DEBUG */
1824 * Check to see if we don't need to split at all.
1826 if (lock1->lf_start == lock2->lf_start) {
1827 lf_set_start(state, lock1, lock2->lf_end + 1, granted);
1830 if (lock1->lf_end == lock2->lf_end) {
1831 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1835 * Make a new lock consisting of the last part of
1836 * the encompassing lock.
1838 splitlock = lf_alloc_lock(lock1->lf_owner);
1839 memcpy(splitlock, lock1, sizeof *splitlock);
1840 splitlock->lf_refs = 1;
1841 if (splitlock->lf_flags & F_REMOTE)
1842 vref(splitlock->lf_vnode);
1845 * This cannot cause a deadlock since any edges we would add
1846 * to splitlock already exist in lock1. We must be sure to add
1847 * necessary dependencies to splitlock before we reduce lock1
1848 * otherwise we may accidentally grant a pending lock that
1849 * was blocked by the tail end of lock1.
1851 splitlock->lf_start = lock2->lf_end + 1;
1852 LIST_INIT(&splitlock->lf_outedges);
1853 LIST_INIT(&splitlock->lf_inedges);
1854 sx_xlock(&lf_owner_graph_lock);
1855 lf_add_incoming(state, splitlock);
1856 sx_xunlock(&lf_owner_graph_lock);
1858 lf_set_end(state, lock1, lock2->lf_start - 1, granted);
1861 * OK, now link it in
1863 lf_insert_lock(state, splitlock);
1867 STAILQ_ENTRY(lockdesc) link;
1871 STAILQ_HEAD(lockdesclist, lockdesc);
1874 lf_iteratelocks_sysid(int sysid, lf_iterator *fn, void *arg)
1877 struct lockf_entry *lf;
1878 struct lockdesc *ldesc;
1879 struct lockdesclist locks;
1883 * In order to keep the locking simple, we iterate over the
1884 * active lock lists to build a list of locks that need
1885 * releasing. We then call the iterator for each one in turn.
1887 * We take an extra reference to the vnode for the duration to
1888 * make sure it doesn't go away before we are finished.
1890 STAILQ_INIT(&locks);
1891 sx_xlock(&lf_lock_states_lock);
1892 LIST_FOREACH(ls, &lf_lock_states, ls_link) {
1893 sx_xlock(&ls->ls_lock);
1894 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1895 if (lf->lf_owner->lo_sysid != sysid)
1898 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1900 ldesc->vp = lf->lf_vnode;
1902 ldesc->fl.l_start = lf->lf_start;
1903 if (lf->lf_end == OFF_MAX)
1904 ldesc->fl.l_len = 0;
1907 lf->lf_end - lf->lf_start + 1;
1908 ldesc->fl.l_whence = SEEK_SET;
1909 ldesc->fl.l_type = F_UNLCK;
1910 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1911 ldesc->fl.l_sysid = sysid;
1912 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1914 sx_xunlock(&ls->ls_lock);
1916 sx_xunlock(&lf_lock_states_lock);
1919 * Call the iterator function for each lock in turn. If the
1920 * iterator returns an error code, just free the rest of the
1921 * lockdesc structures.
1924 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1925 STAILQ_REMOVE_HEAD(&locks, link);
1927 error = fn(ldesc->vp, &ldesc->fl, arg);
1929 free(ldesc, M_LOCKF);
1936 lf_iteratelocks_vnode(struct vnode *vp, lf_iterator *fn, void *arg)
1939 struct lockf_entry *lf;
1940 struct lockdesc *ldesc;
1941 struct lockdesclist locks;
1945 * In order to keep the locking simple, we iterate over the
1946 * active lock lists to build a list of locks that need
1947 * releasing. We then call the iterator for each one in turn.
1949 * We take an extra reference to the vnode for the duration to
1950 * make sure it doesn't go away before we are finished.
1952 STAILQ_INIT(&locks);
1962 sx_xlock(&ls->ls_lock);
1963 LIST_FOREACH(lf, &ls->ls_active, lf_link) {
1964 ldesc = malloc(sizeof(struct lockdesc), M_LOCKF,
1966 ldesc->vp = lf->lf_vnode;
1968 ldesc->fl.l_start = lf->lf_start;
1969 if (lf->lf_end == OFF_MAX)
1970 ldesc->fl.l_len = 0;
1973 lf->lf_end - lf->lf_start + 1;
1974 ldesc->fl.l_whence = SEEK_SET;
1975 ldesc->fl.l_type = F_UNLCK;
1976 ldesc->fl.l_pid = lf->lf_owner->lo_pid;
1977 ldesc->fl.l_sysid = lf->lf_owner->lo_sysid;
1978 STAILQ_INSERT_TAIL(&locks, ldesc, link);
1980 sx_xunlock(&ls->ls_lock);
1987 * Call the iterator function for each lock in turn. If the
1988 * iterator returns an error code, just free the rest of the
1989 * lockdesc structures.
1992 while ((ldesc = STAILQ_FIRST(&locks)) != NULL) {
1993 STAILQ_REMOVE_HEAD(&locks, link);
1995 error = fn(ldesc->vp, &ldesc->fl, arg);
1997 free(ldesc, M_LOCKF);
2004 lf_clearremotesys_iterator(struct vnode *vp, struct flock *fl, void *arg)
2007 VOP_ADVLOCK(vp, 0, F_UNLCK, fl, F_REMOTE);
2012 lf_clearremotesys(int sysid)
2015 KASSERT(sysid != 0, ("Can't clear local locks with F_UNLCKSYS"));
2016 lf_iteratelocks_sysid(sysid, lf_clearremotesys_iterator, NULL);
2020 lf_countlocks(int sysid)
2023 struct lock_owner *lo;
2027 sx_xlock(&lf_lock_owners_lock);
2028 for (i = 0; i < LOCK_OWNER_HASH_SIZE; i++)
2029 LIST_FOREACH(lo, &lf_lock_owners[i], lo_link)
2030 if (lo->lo_sysid == sysid)
2031 count += lo->lo_refs;
2032 sx_xunlock(&lf_lock_owners_lock);
2040 * Return non-zero if y is reachable from x using a brute force
2041 * search. If reachable and path is non-null, return the route taken
2045 graph_reaches(struct owner_vertex *x, struct owner_vertex *y,
2046 struct owner_vertex_list *path)
2048 struct owner_edge *e;
2052 TAILQ_INSERT_HEAD(path, x, v_link);
2056 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2057 if (graph_reaches(e->e_to, y, path)) {
2059 TAILQ_INSERT_HEAD(path, x, v_link);
2067 * Perform consistency checks on the graph. Make sure the values of
2068 * v_order are correct. If checkorder is non-zero, check no vertex can
2069 * reach any other vertex with a smaller order.
2072 graph_check(struct owner_graph *g, int checkorder)
2076 for (i = 0; i < g->g_size; i++) {
2077 if (!g->g_vertices[i]->v_owner)
2079 KASSERT(g->g_vertices[i]->v_order == i,
2080 ("lock graph vertices disordered"));
2082 for (j = 0; j < i; j++) {
2083 if (!g->g_vertices[j]->v_owner)
2085 KASSERT(!graph_reaches(g->g_vertices[i],
2086 g->g_vertices[j], NULL),
2087 ("lock graph vertices disordered"));
2094 graph_print_vertices(struct owner_vertex_list *set)
2096 struct owner_vertex *v;
2099 TAILQ_FOREACH(v, set, v_link) {
2100 printf("%d:", v->v_order);
2101 lf_print_owner(v->v_owner);
2102 if (TAILQ_NEXT(v, v_link))
2111 * Calculate the sub-set of vertices v from the affected region [y..x]
2112 * where v is reachable from y. Return -1 if a loop was detected
2113 * (i.e. x is reachable from y, otherwise the number of vertices in
2117 graph_delta_forward(struct owner_graph *g, struct owner_vertex *x,
2118 struct owner_vertex *y, struct owner_vertex_list *delta)
2121 struct owner_vertex *v;
2122 struct owner_edge *e;
2126 * We start with a set containing just y. Then for each vertex
2127 * v in the set so far unprocessed, we add each vertex that v
2128 * has an out-edge to and that is within the affected region
2129 * [y..x]. If we see the vertex x on our travels, stop
2133 TAILQ_INSERT_TAIL(delta, y, v_link);
2138 LIST_FOREACH(e, &v->v_outedges, e_outlink) {
2141 if (e->e_to->v_order < x->v_order
2142 && e->e_to->v_gen != gen) {
2143 e->e_to->v_gen = gen;
2144 TAILQ_INSERT_TAIL(delta, e->e_to, v_link);
2148 v = TAILQ_NEXT(v, v_link);
2155 * Calculate the sub-set of vertices v from the affected region [y..x]
2156 * where v reaches x. Return the number of vertices in this subset.
2159 graph_delta_backward(struct owner_graph *g, struct owner_vertex *x,
2160 struct owner_vertex *y, struct owner_vertex_list *delta)
2163 struct owner_vertex *v;
2164 struct owner_edge *e;
2168 * We start with a set containing just x. Then for each vertex
2169 * v in the set so far unprocessed, we add each vertex that v
2170 * has an in-edge from and that is within the affected region
2174 TAILQ_INSERT_TAIL(delta, x, v_link);
2179 LIST_FOREACH(e, &v->v_inedges, e_inlink) {
2180 if (e->e_from->v_order > y->v_order
2181 && e->e_from->v_gen != gen) {
2182 e->e_from->v_gen = gen;
2183 TAILQ_INSERT_HEAD(delta, e->e_from, v_link);
2187 v = TAILQ_PREV(v, owner_vertex_list, v_link);
2194 graph_add_indices(int *indices, int n, struct owner_vertex_list *set)
2196 struct owner_vertex *v;
2199 TAILQ_FOREACH(v, set, v_link) {
2201 i > 0 && indices[i - 1] > v->v_order; i--)
2203 for (j = n - 1; j >= i; j--)
2204 indices[j + 1] = indices[j];
2205 indices[i] = v->v_order;
2213 graph_assign_indices(struct owner_graph *g, int *indices, int nextunused,
2214 struct owner_vertex_list *set)
2216 struct owner_vertex *v, *vlowest;
2218 while (!TAILQ_EMPTY(set)) {
2220 TAILQ_FOREACH(v, set, v_link) {
2221 if (!vlowest || v->v_order < vlowest->v_order)
2224 TAILQ_REMOVE(set, vlowest, v_link);
2225 vlowest->v_order = indices[nextunused];
2226 g->g_vertices[vlowest->v_order] = vlowest;
2230 return (nextunused);
2234 graph_add_edge(struct owner_graph *g, struct owner_vertex *x,
2235 struct owner_vertex *y)
2237 struct owner_edge *e;
2238 struct owner_vertex_list deltaF, deltaB;
2239 int nF, nB, n, vi, i;
2242 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2244 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2252 if (lockf_debug & 8) {
2253 printf("adding edge %d:", x->v_order);
2254 lf_print_owner(x->v_owner);
2255 printf(" -> %d:", y->v_order);
2256 lf_print_owner(y->v_owner);
2260 if (y->v_order < x->v_order) {
2262 * The new edge violates the order. First find the set
2263 * of affected vertices reachable from y (deltaF) and
2264 * the set of affect vertices affected that reach x
2265 * (deltaB), using the graph generation number to
2266 * detect whether we have visited a given vertex
2267 * already. We re-order the graph so that each vertex
2268 * in deltaB appears before each vertex in deltaF.
2270 * If x is a member of deltaF, then the new edge would
2271 * create a cycle. Otherwise, we may assume that
2272 * deltaF and deltaB are disjoint.
2275 if (g->g_gen == 0) {
2279 for (vi = 0; vi < g->g_size; vi++) {
2280 g->g_vertices[vi]->v_gen = 0;
2284 nF = graph_delta_forward(g, x, y, &deltaF);
2287 if (lockf_debug & 8) {
2288 struct owner_vertex_list path;
2289 printf("deadlock: ");
2291 graph_reaches(y, x, &path);
2292 graph_print_vertices(&path);
2299 if (lockf_debug & 8) {
2300 printf("re-ordering graph vertices\n");
2301 printf("deltaF = ");
2302 graph_print_vertices(&deltaF);
2306 nB = graph_delta_backward(g, x, y, &deltaB);
2309 if (lockf_debug & 8) {
2310 printf("deltaB = ");
2311 graph_print_vertices(&deltaB);
2316 * We first build a set of vertex indices (vertex
2317 * order values) that we may use, then we re-assign
2318 * orders first to those vertices in deltaB, then to
2319 * deltaF. Note that the contents of deltaF and deltaB
2320 * may be partially disordered - we perform an
2321 * insertion sort while building our index set.
2323 indices = g->g_indexbuf;
2324 n = graph_add_indices(indices, 0, &deltaF);
2325 graph_add_indices(indices, n, &deltaB);
2328 * We must also be sure to maintain the relative
2329 * ordering of deltaF and deltaB when re-assigning
2330 * vertices. We do this by iteratively removing the
2331 * lowest ordered element from the set and assigning
2332 * it the next value from our new ordering.
2334 i = graph_assign_indices(g, indices, 0, &deltaB);
2335 graph_assign_indices(g, indices, i, &deltaF);
2338 if (lockf_debug & 8) {
2339 struct owner_vertex_list set;
2341 for (i = 0; i < nB + nF; i++)
2342 TAILQ_INSERT_TAIL(&set,
2343 g->g_vertices[indices[i]], v_link);
2344 printf("new ordering = ");
2345 graph_print_vertices(&set);
2350 KASSERT(x->v_order < y->v_order, ("Failed to re-order graph"));
2353 if (lockf_debug & 8) {
2354 graph_check(g, TRUE);
2358 e = malloc(sizeof(struct owner_edge), M_LOCKF, M_WAITOK);
2360 LIST_INSERT_HEAD(&x->v_outedges, e, e_outlink);
2361 LIST_INSERT_HEAD(&y->v_inedges, e, e_inlink);
2370 * Remove an edge x->y from the graph.
2373 graph_remove_edge(struct owner_graph *g, struct owner_vertex *x,
2374 struct owner_vertex *y)
2376 struct owner_edge *e;
2378 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2380 LIST_FOREACH(e, &x->v_outedges, e_outlink) {
2384 KASSERT(e, ("Removing non-existent edge from deadlock graph"));
2387 if (e->e_refs == 0) {
2389 if (lockf_debug & 8) {
2390 printf("removing edge %d:", x->v_order);
2391 lf_print_owner(x->v_owner);
2392 printf(" -> %d:", y->v_order);
2393 lf_print_owner(y->v_owner);
2397 LIST_REMOVE(e, e_outlink);
2398 LIST_REMOVE(e, e_inlink);
2404 * Allocate a vertex from the free list. Return ENOMEM if there are
2407 static struct owner_vertex *
2408 graph_alloc_vertex(struct owner_graph *g, struct lock_owner *lo)
2410 struct owner_vertex *v;
2412 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2414 v = malloc(sizeof(struct owner_vertex), M_LOCKF, M_WAITOK);
2415 if (g->g_size == g->g_space) {
2416 g->g_vertices = realloc(g->g_vertices,
2417 2 * g->g_space * sizeof(struct owner_vertex *),
2419 free(g->g_indexbuf, M_LOCKF);
2420 g->g_indexbuf = malloc(2 * g->g_space * sizeof(int),
2422 g->g_space = 2 * g->g_space;
2424 v->v_order = g->g_size;
2425 v->v_gen = g->g_gen;
2426 g->g_vertices[g->g_size] = v;
2429 LIST_INIT(&v->v_outedges);
2430 LIST_INIT(&v->v_inedges);
2437 graph_free_vertex(struct owner_graph *g, struct owner_vertex *v)
2439 struct owner_vertex *w;
2442 sx_assert(&lf_owner_graph_lock, SX_XLOCKED);
2444 KASSERT(LIST_EMPTY(&v->v_outedges), ("Freeing vertex with edges"));
2445 KASSERT(LIST_EMPTY(&v->v_inedges), ("Freeing vertex with edges"));
2448 * Remove from the graph's array and close up the gap,
2449 * renumbering the other vertices.
2451 for (i = v->v_order + 1; i < g->g_size; i++) {
2452 w = g->g_vertices[i];
2454 g->g_vertices[i - 1] = w;
2461 static struct owner_graph *
2462 graph_init(struct owner_graph *g)
2465 g->g_vertices = malloc(10 * sizeof(struct owner_vertex *),
2469 g->g_indexbuf = malloc(g->g_space * sizeof(int), M_LOCKF, M_WAITOK);
2477 * Print description of a lock owner
2480 lf_print_owner(struct lock_owner *lo)
2483 if (lo->lo_flags & F_REMOTE) {
2484 printf("remote pid %d, system %d",
2485 lo->lo_pid, lo->lo_sysid);
2486 } else if (lo->lo_flags & F_FLOCK) {
2487 printf("file %p", lo->lo_id);
2489 printf("local pid %d", lo->lo_pid);
2497 lf_print(char *tag, struct lockf_entry *lock)
2500 printf("%s: lock %p for ", tag, (void *)lock);
2501 lf_print_owner(lock->lf_owner);
2502 if (lock->lf_inode != (struct inode *)0)
2503 printf(" in ino %ju on dev <%s>,",
2504 (uintmax_t)lock->lf_inode->i_number,
2505 devtoname(ITODEV(lock->lf_inode)));
2506 printf(" %s, start %jd, end ",
2507 lock->lf_type == F_RDLCK ? "shared" :
2508 lock->lf_type == F_WRLCK ? "exclusive" :
2509 lock->lf_type == F_UNLCK ? "unlock" : "unknown",
2510 (intmax_t)lock->lf_start);
2511 if (lock->lf_end == OFF_MAX)
2514 printf("%jd", (intmax_t)lock->lf_end);
2515 if (!LIST_EMPTY(&lock->lf_outedges))
2516 printf(" block %p\n",
2517 (void *)LIST_FIRST(&lock->lf_outedges)->le_to);
2523 lf_printlist(char *tag, struct lockf_entry *lock)
2525 struct lockf_entry *lf, *blk;
2526 struct lockf_edge *e;
2528 if (lock->lf_inode == (struct inode *)0)
2531 printf("%s: Lock list for ino %ju on dev <%s>:\n",
2532 tag, (uintmax_t)lock->lf_inode->i_number,
2533 devtoname(ITODEV(lock->lf_inode)));
2534 LIST_FOREACH(lf, &lock->lf_vnode->v_lockf->ls_active, lf_link) {
2535 printf("\tlock %p for ",(void *)lf);
2536 lf_print_owner(lock->lf_owner);
2537 printf(", %s, start %jd, end %jd",
2538 lf->lf_type == F_RDLCK ? "shared" :
2539 lf->lf_type == F_WRLCK ? "exclusive" :
2540 lf->lf_type == F_UNLCK ? "unlock" :
2541 "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end);
2542 LIST_FOREACH(e, &lf->lf_outedges, le_outlink) {
2544 printf("\n\t\tlock request %p for ", (void *)blk);
2545 lf_print_owner(blk->lf_owner);
2546 printf(", %s, start %jd, end %jd",
2547 blk->lf_type == F_RDLCK ? "shared" :
2548 blk->lf_type == F_WRLCK ? "exclusive" :
2549 blk->lf_type == F_UNLCK ? "unlock" :
2550 "unknown", (intmax_t)blk->lf_start,
2551 (intmax_t)blk->lf_end);
2552 if (!LIST_EMPTY(&blk->lf_inedges))
2553 panic("lf_printlist: bad list");
2558 #endif /* LOCKF_DEBUG */