2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 * Copyright (c) 2006 Matthew Dillon <dillon@backplane.com>. All rights reserved.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * Scooter Morris at Genentech Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
40 * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
41 * $DragonFly: src/sys/kern/kern_lockf.c,v 1.31 2006/05/27 02:03:17 dillon Exp $
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
49 #include <sys/unistd.h>
50 #include <sys/vnode.h>
51 #include <sys/malloc.h>
52 #include <sys/fcntl.h>
53 #include <sys/resourcevar.h>
55 #include <sys/lockf.h>
56 #include <machine/limits.h> /* for LLONG_MAX */
57 #include <machine/stdarg.h>
60 int lf_global_counter = 0;
64 int lf_print_ranges = 0;
66 static void _lf_print_lock(const struct lockf *);
67 static void _lf_printf(const char *, ...);
69 #define lf_print_lock(lock) if (lf_print_ranges) _lf_print_lock(lock)
70 #define lf_printf(ctl, args...) if (lf_print_ranges) _lf_printf(ctl, args)
72 #define lf_print_lock(lock)
73 #define lf_printf(ctl, args...)
76 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
78 static void lf_wakeup(struct lockf *, off_t, off_t);
79 static struct lockf_range *lf_alloc_range(void);
80 static void lf_create_range(struct lockf_range *, struct proc *, int, int,
82 static void lf_insert(struct lockf_range_list *list,
83 struct lockf_range *elm,
84 struct lockf_range *insert_point);
85 static void lf_destroy_range(struct lockf_range *);
87 static int lf_setlock(struct lockf *, struct proc *, int, int,
89 static int lf_getlock(struct flock *, struct lockf *, struct proc *,
90 int, int, off_t, off_t);
92 static int lf_count_change(struct proc *, int);
95 * Return TRUE (non-zero) if the type and posix flags match.
99 lf_match(struct lockf_range *range, int type, int flags)
101 if (range->lf_type != type)
103 if ((range->lf_flags ^ flags) & F_POSIX)
109 * Check whether range and [start, end] overlap.
113 lf_overlap(const struct lockf_range *range, off_t start, off_t end)
115 if (range->lf_start >= start && range->lf_start <= end)
117 else if (start >= range->lf_start && start <= range->lf_end)
125 * Change the POSIX lock accounting for the given process.
128 lf_count_adjust(struct proc *p, int increase)
134 uip = p->p_ucred->cr_uidinfo;
137 uip->ui_posixlocks += p->p_numposixlocks;
139 uip->ui_posixlocks -= p->p_numposixlocks;
141 KASSERT(uip->ui_posixlocks >= 0,
142 ("Negative number of POSIX locks held by %s user: %d.",
143 increase ? "new" : "old", uip->ui_posixlocks));
147 lf_count_change(struct proc *owner, int diff)
152 /* we might actually not have a process context */
156 uip = owner->p_ucred->cr_uidinfo;
158 max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
159 maxposixlocksperuid);
160 if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
161 uip->ui_posixlocks >= max ) {
165 uip->ui_posixlocks += diff;
166 owner->p_numposixlocks += diff;
168 KASSERT(uip->ui_posixlocks >= 0,
169 ("Negative number of POSIX locks held by user: %d.",
170 uip->ui_posixlocks));
171 KASSERT(owner->p_numposixlocks >= 0,
172 ("Negative number of POSIX locks held by proc: %d.",
173 uip->ui_posixlocks));
179 * Advisory record locking support
182 lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size)
184 struct flock *fl = ap->a_fl;
187 int type, flags, error;
191 * Convert the flock structure into a start and end.
193 switch (fl->l_whence) {
197 * Caller is responsible for adding any necessary offset
198 * when SEEK_CUR is used.
204 start = size + fl->l_start;
212 if (fl->l_len == 0) {
216 end = start + fl->l_len - 1;
224 * This isn't really correct for flock-style locks,
225 * but the current handling is somewhat broken anyway.
227 owner = (struct proc *)ap->a_id;
230 * Do the requested operation.
232 lwkt_gettoken(&ilock, lwkt_token_pool_get(lock));
234 if (lock->init_done == 0) {
235 TAILQ_INIT(&lock->lf_range);
236 TAILQ_INIT(&lock->lf_blocked);
242 ap->a_vp->v_flag |= VMAYHAVELOCKS;
243 error = lf_setlock(lock, owner, type, flags, start, end);
247 error = lf_setlock(lock, owner, type, flags, start, end);
248 if (TAILQ_EMPTY(&lock->lf_range) &&
249 TAILQ_EMPTY(&lock->lf_blocked)) {
250 ap->a_vp->v_flag &= ~VMAYHAVELOCKS;
255 error = lf_getlock(fl, lock, owner, type, flags, start, end);
262 lwkt_reltoken(&ilock);
267 lf_setlock(struct lockf *lock, struct proc *owner, int type, int flags,
268 off_t start, off_t end)
270 struct lockf_range *range;
271 struct lockf_range *brange;
272 struct lockf_range *next;
273 struct lockf_range *first_match;
274 struct lockf_range *last_match;
275 struct lockf_range *insert_point;
276 struct lockf_range *new_range1;
277 struct lockf_range *new_range2;
282 struct lockf_range_list deadlist;
290 * Preallocate two ranges so we don't have to worry about blocking
291 * in the middle of the lock code.
293 if (new_range1 == NULL)
294 new_range1 = lf_alloc_range();
295 if (new_range2 == NULL)
296 new_range2 = lf_alloc_range();
305 * Locate the insertion point for the new lock (the first range
306 * with an lf_start >= start).
308 * Locate the first and latch ranges owned by us that overlap
309 * the requested range.
311 TAILQ_FOREACH(range, &lock->lf_range, lf_link) {
312 if (insert_point == NULL && range->lf_start >= start)
313 insert_point = range;
316 * Skip non-overlapping locks. Locks are sorted by lf_start
317 * So we can terminate the search when lf_start exceeds the
318 * requested range (insert_point is still guarenteed to be
321 if (range->lf_end < start)
323 if (range->lf_start > end) {
329 * Overlapping lock. Set first_match and last_match if we
332 if (range->lf_owner == owner) {
333 if (first_match == NULL)
340 * If we aren't the owner check for a conflicting lock. Only
343 if (type != F_UNLCK) {
344 if (type == F_WRLCK || range->lf_type == F_WRLCK)
350 * If a conflicting lock was observed, block or fail as appropriate.
351 * (this code is skipped when unlocking)
354 if ((flags & F_WAIT) == 0) {
360 * We are blocked. For POSIX locks we have to check
361 * for deadlocks and return with EDEADLK. This is done
362 * by checking whether range->lf_owner is already
365 * Since flock-style locks cover the whole file, a
366 * deadlock between those is nearly impossible.
367 * This can only occur if a process tries to lock the
368 * same inode exclusively while holding a shared lock
369 * with another descriptor.
370 * XXX How can we cleanly detect this?
371 * XXX The current mixing of flock & fcntl/lockf is evil.
373 * Handle existing locks of flock-style like POSIX locks.
375 if (flags & F_POSIX) {
376 TAILQ_FOREACH(brange, &lock->lf_blocked, lf_link)
377 if (brange->lf_owner == range->lf_owner) {
384 * For flock-style locks, we must first remove
385 * any shared locks that we hold before we sleep
386 * waiting for an exclusive lock.
388 if ((flags & F_POSIX) == 0 && type == F_WRLCK)
389 lf_setlock(lock, owner, F_UNLCK, 0, start, end);
393 lf_create_range(brange, owner, type, 0, start, end);
394 TAILQ_INSERT_TAIL(&lock->lf_blocked, brange, lf_link);
395 error = tsleep(brange, PCATCH, "lockf", 0);
398 * We may have been awaked by a signal and/or by a
399 * debugger continuing us (in which case we must remove
400 * ourselves from the blocked list) and/or by another
401 * process releasing/downgrading a lock (in which case
402 * we have already been removed from the blocked list
403 * and our lf_flags field is 1).
405 * Sleep if it looks like we might be livelocking.
407 if (brange->lf_flags == 0)
408 TAILQ_REMOVE(&lock->lf_blocked, brange, lf_link);
410 tsleep(brange, 0, "lockfz", 2);
413 lf_destroy_range(brange);
421 * If there are no overlapping locks owned by us then creating
422 * the new lock is easy. This is the most common case.
424 if (first_match == NULL) {
427 if (flags & F_POSIX) {
428 if (lf_count_change(owner, 1)) {
435 lf_create_range(range, owner, type, flags, start, end);
436 lf_insert(&lock->lf_range, range, insert_point);
441 * This is a special case that we need to check for in a couple
444 if (first_match == last_match && first_match->lf_start < start &&
445 last_match->lf_end > end) {
452 * Figure out the worst case net increase in POSIX locks and account
453 * for it now before we start modifying things. If neither the
454 * first or last locks match we have an issue. If there is only
455 * one overlapping range which needs to be clipped on both ends
456 * we wind up having to create up to two new locks, else only one.
458 * When unlocking the worst case is always 1 new lock if our
459 * unlock request cuts the middle out of an existing lock range.
461 * count represents the 'cleanup' adjustment needed. It starts
462 * negative, is incremented whenever we create a new POSIX lock,
463 * and decremented whenever we delete an existing one. At the
464 * end of the day it had better be <= 0 or we didn't calculate the
465 * worse case properly here.
468 if (flags & F_POSIX) {
469 if (!lf_match(first_match, type, flags) &&
470 !lf_match(last_match, type, flags)
472 if (double_clip && type != F_UNLCK)
477 if (count && lf_count_change(owner, -count)) {
482 /* else flock style lock which encompasses entire range */
485 * Create and insert the lock represented the requested range.
486 * Adjust the net POSIX lock count. We have to move our insertion
487 * point since brange now represents the first record >= start.
489 * When unlocking, no new lock is inserted but we still clip.
491 if (type != F_UNLCK) {
494 lf_create_range(brange, owner, type, flags, start, end);
495 lf_insert(&lock->lf_range, brange, insert_point);
496 insert_point = brange;
504 * Handle the double_clip case. This is the only case where
505 * we wind up having to add TWO locks.
508 KKASSERT(first_match == last_match);
509 last_match = new_range2;
511 lf_create_range(last_match, first_match->lf_owner,
512 first_match->lf_type, first_match->lf_flags,
513 end + 1, first_match->lf_end);
514 first_match->lf_end = start - 1;
515 first_match->lf_flags &= ~F_NOEND;
518 * Figure out where to insert the right side clip.
520 lf_insert(&lock->lf_range, last_match, first_match);
521 if (last_match->lf_flags & F_POSIX)
526 * Clip or destroy the locks between first_match and last_match,
527 * inclusive. Ignore the primary lock we created (brange). Note
528 * that if double-clipped, first_match and last_match will be
529 * outside our clipping range. Otherwise first_match and last_match
532 * We have already taken care of any double clipping.
534 * The insert_point may become invalid as we delete records, do not
535 * use that pointer any more. Also, when removing something other
536 * then 'range' we have to check to see if the item we are removing
537 * is 'next' and adjust 'next' properly.
539 * NOTE: brange will be NULL if F_UNLCKing.
541 TAILQ_INIT(&deadlist);
544 while ((range = next) != NULL) {
545 next = TAILQ_NEXT(range, lf_link);
548 * Ignore elements that we do not own and ignore the
549 * primary request range which we just created.
551 if (range->lf_owner != owner || range == brange)
555 * We may have to wakeup a waiter when downgrading a lock.
559 if (type == F_RDLCK && range->lf_type == F_WRLCK)
563 * Clip left. This can only occur on first_match.
565 * Merge the left clip with brange if possible. This must
566 * be done specifically, not in the optimized merge heuristic
567 * below, since we may have counted on it in our 'count'
570 if (range->lf_start < start) {
571 KKASSERT(range == first_match);
573 range->lf_end >= start - 1 &&
574 lf_match(range, type, flags)) {
575 range->lf_end = brange->lf_end;
576 range->lf_flags |= brange->lf_flags & F_NOEND;
578 * Removing something other then 'range',
579 * adjust 'next' if necessary.
582 next = TAILQ_NEXT(next, lf_link);
583 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
584 if (brange->lf_flags & F_POSIX)
586 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
588 } else if (range->lf_end >= start) {
589 range->lf_end = start - 1;
591 range->lf_flags &= ~F_NOEND;
593 if (range == last_match)
599 * Clip right. This can only occur on last_match.
601 * Merge the right clip if possible. This must be done
602 * specifically, not in the optimized merge heuristic
603 * below, since we may have counted on it in our 'count'
606 * Since we are adjusting lf_start, we have to move the
607 * record to maintain the sorted list. Since lf_start is
608 * only getting larger we can use the next element as the
609 * insert point (we don't have to backtrack).
611 if (range->lf_end > end) {
612 KKASSERT(range == last_match);
614 range->lf_start <= end + 1 &&
615 lf_match(range, type, flags)) {
616 brange->lf_end = range->lf_end;
617 brange->lf_flags |= range->lf_flags & F_NOEND;
618 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
619 if (range->lf_flags & F_POSIX)
621 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
622 } else if (range->lf_start <= end) {
623 range->lf_start = end + 1;
624 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
625 lf_insert(&lock->lf_range, range, next);
627 /* range == last_match, we are done */
632 * The record must be entirely enclosed. Note that the
633 * record could be first_match or last_match, and will be
636 KKASSERT(range->lf_start >= start && range->lf_end <= end);
637 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
638 if (range->lf_flags & F_POSIX)
640 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
641 if (range == last_match)
646 * Attempt to merge locks adjacent to brange. For example, we may
647 * have had to clip first_match and/or last_match, and they might
648 * be adjacent. Or there might simply have been an adjacent lock
651 * Don't get fancy, just check adjacent elements in the list if they
652 * happen to be owned by us.
654 * This case only gets hit if we have a situation where a shared
655 * and exclusive lock are adjacent, and the exclusive lock is
656 * downgraded to shared or the shared lock is upgraded to exclusive.
659 range = TAILQ_PREV(brange, lockf_range_list, lf_link);
661 range->lf_owner == owner &&
662 range->lf_end == brange->lf_start - 1 &&
663 lf_match(range, type, flags)
666 * Extend range to cover brange and scrap brange.
668 range->lf_end = brange->lf_end;
669 range->lf_flags |= brange->lf_flags & F_NOEND;
670 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
671 if (brange->lf_flags & F_POSIX)
673 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
676 range = TAILQ_NEXT(brange, lf_link);
678 range->lf_owner == owner &&
679 range->lf_start == brange->lf_end + 1 &&
680 lf_match(range, type, flags)
683 * Extend brange to cover range and scrap range.
685 brange->lf_end = range->lf_end;
686 brange->lf_flags |= brange->lf_flags & F_NOEND;
687 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
688 if (range->lf_flags & F_POSIX)
690 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
695 * Destroy deleted elements. We didn't want to do it in the loop
696 * because the free() might have blocked.
698 * Adjust the count for any posix locks we thought we might create
701 while ((range = TAILQ_FIRST(&deadlist)) != NULL) {
702 TAILQ_REMOVE(&deadlist, range, lf_link);
703 lf_destroy_range(range);
706 KKASSERT(count <= 0);
708 lf_count_change(owner, count);
712 lf_wakeup(lock, start, end);
715 if (new_range1 != NULL)
716 lf_destroy_range(new_range1);
717 if (new_range2 != NULL)
718 lf_destroy_range(new_range2);
723 * Check whether there is a blocking lock,
724 * and if so return its process identifier.
727 lf_getlock(struct flock *fl, struct lockf *lock, struct proc *owner,
728 int type, int flags, off_t start, off_t end)
730 struct lockf_range *range;
732 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
733 if (range->lf_owner != owner &&
734 lf_overlap(range, start, end) &&
735 (type == F_WRLCK || range->lf_type == F_WRLCK))
738 fl->l_type = F_UNLCK;
741 fl->l_type = range->lf_type;
742 fl->l_whence = SEEK_SET;
743 fl->l_start = range->lf_start;
744 if (range->lf_flags & F_NOEND)
747 fl->l_len = range->lf_end - range->lf_start + 1;
748 if (range->lf_owner != NULL && (range->lf_flags & F_POSIX))
749 fl->l_pid = range->lf_owner->p_pid;
756 * Wakeup pending lock attempts.
759 lf_wakeup(struct lockf *lock, off_t start, off_t end)
761 struct lockf_range *range, *nrange;
762 TAILQ_FOREACH_MUTABLE(range, &lock->lf_blocked, lf_link, nrange) {
763 if (lf_overlap(range, start, end) == 0)
765 TAILQ_REMOVE(&lock->lf_blocked, range, lf_link);
768 if (range->lf_start >= start && range->lf_end <= end)
774 * Allocate a range structure and initialize it sufficiently such that
775 * lf_destroy_range() does not barf.
777 static struct lockf_range *
780 struct lockf_range *range;
785 range = malloc(sizeof(struct lockf_range), M_LOCKF, M_WAITOK);
786 range->lf_owner = NULL;
791 lf_insert(struct lockf_range_list *list, struct lockf_range *elm,
792 struct lockf_range *insert_point)
794 while (insert_point && insert_point->lf_start < elm->lf_start)
795 insert_point = TAILQ_NEXT(insert_point, lf_link);
796 if (insert_point != NULL)
797 TAILQ_INSERT_BEFORE(insert_point, elm, lf_link);
799 TAILQ_INSERT_TAIL(list, elm, lf_link);
803 lf_create_range(struct lockf_range *range, struct proc *owner, int type,
804 int flags, off_t start, off_t end)
806 KKASSERT(start <= end);
807 range->lf_type = type;
808 range->lf_flags = flags;
809 range->lf_start = start;
811 range->lf_owner = owner;
813 lf_printf("lf_create_range: %lld..%lld\n",
814 range->lf_start, range->lf_end);
818 lf_destroy_range(struct lockf_range *range)
820 lf_printf("lf_destroy_range: %lld..%lld\n",
821 range->lf_start, range->lf_end);
822 free(range, M_LOCKF);
825 KKASSERT(lf_global_counter>=0);
832 _lf_printf(const char *ctl, ...)
837 if (lf_print_ranges) {
838 if ((p = curproc) != NULL)
839 printf("pid %d (%s): ", p->p_pid, p->p_comm);
847 _lf_print_lock(const struct lockf *lock)
849 struct lockf_range *range;
851 if (lf_print_ranges == 0)
854 if (TAILQ_EMPTY(&lock->lf_range)) {
855 lf_printf("lockf %p: no ranges locked\n", lock);
857 lf_printf("lockf %p:\n", lock);
859 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
860 printf("\t%lld..%lld type %s owned by %d\n",
861 range->lf_start, range->lf_end,
862 range->lf_type == F_RDLCK ? "shared" : "exclusive",
863 range->lf_flags & F_POSIX ? range->lf_owner->p_pid : -1);
864 if (TAILQ_EMPTY(&lock->lf_blocked))
865 printf("no process waiting for range\n");
867 printf("blocked locks:");
868 TAILQ_FOREACH(range, &lock->lf_blocked, lf_link)
869 printf("\t%lld..%lld type %s waiting on %p\n",
870 range->lf_start, range->lf_end,
871 range->lf_type == F_RDLCK ? "shared" : "exclusive",
874 #endif /* LOCKF_DEBUG */