2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 * Copyright (c) 2006 Matthew Dillon <dillon@backplane.com>. All rights reserved.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * Scooter Morris at Genentech Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
40 * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
41 * $DragonFly: src/sys/kern/kern_lockf.c,v 1.30 2006/05/27 01:57:41 dillon Exp $
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
49 #include <sys/unistd.h>
50 #include <sys/vnode.h>
51 #include <sys/malloc.h>
52 #include <sys/fcntl.h>
53 #include <sys/resourcevar.h>
55 #include <sys/lockf.h>
56 #include <machine/limits.h> /* for LLONG_MAX */
57 #include <machine/stdarg.h>
60 int lf_global_counter = 0;
64 int lf_print_ranges = 0;
66 static void _lf_print_lock(const struct lockf *);
67 static void _lf_printf(const char *, ...);
69 #define lf_print_lock(lock) if (lf_print_ranges) _lf_print_lock(lock)
70 #define lf_printf(ctl, args...) if (lf_print_ranges) _lf_printf(ctl, args)
72 #define lf_print_lock(lock)
73 #define lf_printf(ctl, args...)
76 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
78 static void lf_wakeup(struct lockf *, off_t, off_t);
79 static struct lockf_range *lf_alloc_range(void);
80 static void lf_create_range(struct lockf_range *, struct proc *, int, int,
82 static void lf_insert(struct lockf_range_list *list,
83 struct lockf_range *elm,
84 struct lockf_range *insert_point);
85 static void lf_destroy_range(struct lockf_range *);
87 static int lf_setlock(struct lockf *, struct proc *, int, int,
89 static int lf_getlock(struct flock *, struct lockf *, struct proc *,
90 int, int, off_t, off_t);
92 static int lf_count_change(struct proc *, int);
95 * Return TRUE (non-zero) if the type and posix flags match.
99 lf_match(struct lockf_range *range, int type, int flags)
101 if (range->lf_type != type)
103 if ((range->lf_flags ^ flags) & F_POSIX)
109 * Check whether range and [start, end] overlap.
113 lf_overlap(const struct lockf_range *range, off_t start, off_t end)
115 if (range->lf_start >= start && range->lf_start <= end)
117 else if (start >= range->lf_start && start <= range->lf_end)
125 * Change the POSIX lock accounting for the given process.
128 lf_count_adjust(struct proc *p, int increase)
134 uip = p->p_ucred->cr_uidinfo;
137 uip->ui_posixlocks += p->p_numposixlocks;
139 uip->ui_posixlocks -= p->p_numposixlocks;
141 KASSERT(uip->ui_posixlocks >= 0,
142 ("Negative number of POSIX locks held by %s user: %d.",
143 increase ? "new" : "old", uip->ui_posixlocks));
147 lf_count_change(struct proc *owner, int diff)
152 /* we might actually not have a process context */
156 uip = owner->p_ucred->cr_uidinfo;
158 max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
159 maxposixlocksperuid);
160 if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
161 uip->ui_posixlocks >= max ) {
165 uip->ui_posixlocks += diff;
166 owner->p_numposixlocks += diff;
168 KASSERT(uip->ui_posixlocks >= 0,
169 ("Negative number of POSIX locks held by user: %d.",
170 uip->ui_posixlocks));
171 KASSERT(owner->p_numposixlocks >= 0,
172 ("Negative number of POSIX locks held by proc: %d.",
173 uip->ui_posixlocks));
179 * Advisory record locking support
182 lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size)
184 struct flock *fl = ap->a_fl;
187 int type, flags, error;
191 * Convert the flock structure into a start and end.
193 switch (fl->l_whence) {
197 * Caller is responsible for adding any necessary offset
198 * when SEEK_CUR is used.
204 start = size + fl->l_start;
212 if (fl->l_len == 0) {
216 end = start + fl->l_len - 1;
224 * This isn't really correct for flock-style locks,
225 * but the current handling is somewhat broken anyway.
227 owner = (struct proc *)ap->a_id;
230 * Do the requested operation.
232 lwkt_gettoken(&ilock, lwkt_token_pool_get(lock));
234 if (lock->init_done == 0) {
235 TAILQ_INIT(&lock->lf_range);
236 TAILQ_INIT(&lock->lf_blocked);
242 ap->a_vp->v_flag |= VMAYHAVELOCKS;
243 error = lf_setlock(lock, owner, type, flags, start, end);
247 error = lf_setlock(lock, owner, type, flags, start, end);
251 error = lf_getlock(fl, lock, owner, type, flags, start, end);
258 lwkt_reltoken(&ilock);
263 lf_setlock(struct lockf *lock, struct proc *owner, int type, int flags,
264 off_t start, off_t end)
266 struct lockf_range *range;
267 struct lockf_range *brange;
268 struct lockf_range *next;
269 struct lockf_range *first_match;
270 struct lockf_range *last_match;
271 struct lockf_range *insert_point;
272 struct lockf_range *new_range1;
273 struct lockf_range *new_range2;
278 struct lockf_range_list deadlist;
286 * Preallocate two ranges so we don't have to worry about blocking
287 * in the middle of the lock code.
289 if (new_range1 == NULL)
290 new_range1 = lf_alloc_range();
291 if (new_range2 == NULL)
292 new_range2 = lf_alloc_range();
301 * Locate the insertion point for the new lock (the first range
302 * with an lf_start >= start).
304 * Locate the first and latch ranges owned by us that overlap
305 * the requested range.
307 TAILQ_FOREACH(range, &lock->lf_range, lf_link) {
308 if (insert_point == NULL && range->lf_start >= start)
309 insert_point = range;
312 * Skip non-overlapping locks. Locks are sorted by lf_start
313 * So we can terminate the search when lf_start exceeds the
314 * requested range (insert_point is still guarenteed to be
317 if (range->lf_end < start)
319 if (range->lf_start > end) {
325 * Overlapping lock. Set first_match and last_match if we
328 if (range->lf_owner == owner) {
329 if (first_match == NULL)
336 * If we aren't the owner check for a conflicting lock. Only
339 if (type != F_UNLCK) {
340 if (type == F_WRLCK || range->lf_type == F_WRLCK)
346 * If a conflicting lock was observed, block or fail as appropriate.
347 * (this code is skipped when unlocking)
350 if ((flags & F_WAIT) == 0) {
356 * We are blocked. For POSIX locks we have to check
357 * for deadlocks and return with EDEADLK. This is done
358 * by checking whether range->lf_owner is already
361 * Since flock-style locks cover the whole file, a
362 * deadlock between those is nearly impossible.
363 * This can only occur if a process tries to lock the
364 * same inode exclusively while holding a shared lock
365 * with another descriptor.
366 * XXX How can we cleanly detect this?
367 * XXX The current mixing of flock & fcntl/lockf is evil.
369 * Handle existing locks of flock-style like POSIX locks.
371 if (flags & F_POSIX) {
372 TAILQ_FOREACH(brange, &lock->lf_blocked, lf_link)
373 if (brange->lf_owner == range->lf_owner) {
380 * For flock-style locks, we must first remove
381 * any shared locks that we hold before we sleep
382 * waiting for an exclusive lock.
384 if ((flags & F_POSIX) == 0 && type == F_WRLCK)
385 lf_setlock(lock, owner, F_UNLCK, 0, start, end);
389 lf_create_range(brange, owner, type, 0, start, end);
390 TAILQ_INSERT_TAIL(&lock->lf_blocked, brange, lf_link);
391 error = tsleep(brange, PCATCH, "lockf", 0);
394 * We may have been awaked by a signal and/or by a
395 * debugger continuing us (in which case we must remove
396 * ourselves from the blocked list) and/or by another
397 * process releasing/downgrading a lock (in which case
398 * we have already been removed from the blocked list
399 * and our lf_flags field is 1).
401 * Sleep if it looks like we might be livelocking.
403 if (brange->lf_flags == 0)
404 TAILQ_REMOVE(&lock->lf_blocked, brange, lf_link);
406 tsleep(brange, 0, "lockfz", 2);
409 lf_destroy_range(brange);
417 * If there are no overlapping locks owned by us then creating
418 * the new lock is easy. This is the most common case.
420 if (first_match == NULL) {
423 if (flags & F_POSIX) {
424 if (lf_count_change(owner, 1)) {
431 lf_create_range(range, owner, type, flags, start, end);
432 lf_insert(&lock->lf_range, range, insert_point);
437 * This is a special case that we need to check for in a couple
440 if (first_match == last_match && first_match->lf_start < start &&
441 last_match->lf_end > end) {
448 * Figure out the worst case net increase in POSIX locks and account
449 * for it now before we start modifying things. If neither the
450 * first or last locks match we have an issue. If there is only
451 * one overlapping range which needs to be clipped on both ends
452 * we wind up having to create up to two new locks, else only one.
454 * When unlocking the worst case is always 1 new lock if our
455 * unlock request cuts the middle out of an existing lock range.
457 * count represents the 'cleanup' adjustment needed. It starts
458 * negative, is incremented whenever we create a new POSIX lock,
459 * and decremented whenever we delete an existing one. At the
460 * end of the day it had better be <= 0 or we didn't calculate the
461 * worse case properly here.
464 if (flags & F_POSIX) {
465 if (!lf_match(first_match, type, flags) &&
466 !lf_match(last_match, type, flags)
468 if (double_clip && type != F_UNLCK)
473 if (count && lf_count_change(owner, -count)) {
478 /* else flock style lock which encompasses entire range */
481 * Create and insert the lock represented the requested range.
482 * Adjust the net POSIX lock count. We have to move our insertion
483 * point since brange now represents the first record >= start.
485 * When unlocking, no new lock is inserted but we still clip.
487 if (type != F_UNLCK) {
490 lf_create_range(brange, owner, type, flags, start, end);
491 lf_insert(&lock->lf_range, brange, insert_point);
492 insert_point = brange;
500 * Handle the double_clip case. This is the only case where
501 * we wind up having to add TWO locks.
504 KKASSERT(first_match == last_match);
505 last_match = new_range2;
507 lf_create_range(last_match, first_match->lf_owner,
508 first_match->lf_type, first_match->lf_flags,
509 end + 1, first_match->lf_end);
510 first_match->lf_end = start - 1;
511 first_match->lf_flags &= ~F_NOEND;
514 * Figure out where to insert the right side clip.
516 lf_insert(&lock->lf_range, last_match, first_match);
517 if (last_match->lf_flags & F_POSIX)
522 * Clip or destroy the locks between first_match and last_match,
523 * inclusive. Ignore the primary lock we created (brange). Note
524 * that if double-clipped, first_match and last_match will be
525 * outside our clipping range. Otherwise first_match and last_match
528 * We have already taken care of any double clipping.
530 * The insert_point may become invalid as we delete records, do not
531 * use that pointer any more. Also, when removing something other
532 * then 'range' we have to check to see if the item we are removing
533 * is 'next' and adjust 'next' properly.
535 * NOTE: brange will be NULL if F_UNLCKing.
537 TAILQ_INIT(&deadlist);
540 while ((range = next) != NULL) {
541 next = TAILQ_NEXT(range, lf_link);
544 * Ignore elements that we do not own and ignore the
545 * primary request range which we just created.
547 if (range->lf_owner != owner || range == brange)
551 * We may have to wakeup a waiter when downgrading a lock.
555 if (type == F_RDLCK && range->lf_type == F_WRLCK)
559 * Clip left. This can only occur on first_match.
561 * Merge the left clip with brange if possible. This must
562 * be done specifically, not in the optimized merge heuristic
563 * below, since we may have counted on it in our 'count'
566 if (range->lf_start < start) {
567 KKASSERT(range == first_match);
569 range->lf_end >= start - 1 &&
570 lf_match(range, type, flags)) {
571 range->lf_end = brange->lf_end;
572 range->lf_flags |= brange->lf_flags & F_NOEND;
574 * Removing something other then 'range',
575 * adjust 'next' if necessary.
578 next = TAILQ_NEXT(next, lf_link);
579 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
580 if (brange->lf_flags & F_POSIX)
582 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
584 } else if (range->lf_end >= start) {
585 range->lf_end = start - 1;
587 range->lf_flags &= ~F_NOEND;
589 if (range == last_match)
595 * Clip right. This can only occur on last_match.
597 * Merge the right clip if possible. This must be done
598 * specifically, not in the optimized merge heuristic
599 * below, since we may have counted on it in our 'count'
602 * Since we are adjusting lf_start, we have to move the
603 * record to maintain the sorted list. Since lf_start is
604 * only getting larger we can use the next element as the
605 * insert point (we don't have to backtrack).
607 if (range->lf_end > end) {
608 KKASSERT(range == last_match);
610 range->lf_start <= end + 1 &&
611 lf_match(range, type, flags)) {
612 brange->lf_end = range->lf_end;
613 brange->lf_flags |= range->lf_flags & F_NOEND;
614 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
615 if (range->lf_flags & F_POSIX)
617 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
618 } else if (range->lf_start <= end) {
619 range->lf_start = end + 1;
620 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
621 lf_insert(&lock->lf_range, range, next);
623 /* range == last_match, we are done */
628 * The record must be entirely enclosed. Note that the
629 * record could be first_match or last_match, and will be
632 KKASSERT(range->lf_start >= start && range->lf_end <= end);
633 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
634 if (range->lf_flags & F_POSIX)
636 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
637 if (range == last_match)
642 * Attempt to merge locks adjacent to brange. For example, we may
643 * have had to clip first_match and/or last_match, and they might
644 * be adjacent. Or there might simply have been an adjacent lock
647 * Don't get fancy, just check adjacent elements in the list if they
648 * happen to be owned by us.
650 * This case only gets hit if we have a situation where a shared
651 * and exclusive lock are adjacent, and the exclusive lock is
652 * downgraded to shared or the shared lock is upgraded to exclusive.
655 range = TAILQ_PREV(brange, lockf_range_list, lf_link);
657 range->lf_owner == owner &&
658 range->lf_end == brange->lf_start - 1 &&
659 lf_match(range, type, flags)
662 * Extend range to cover brange and scrap brange.
664 range->lf_end = brange->lf_end;
665 range->lf_flags |= brange->lf_flags & F_NOEND;
666 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
667 if (brange->lf_flags & F_POSIX)
669 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
672 range = TAILQ_NEXT(brange, lf_link);
674 range->lf_owner == owner &&
675 range->lf_start == brange->lf_end + 1 &&
676 lf_match(range, type, flags)
679 * Extend brange to cover range and scrap range.
681 brange->lf_end = range->lf_end;
682 brange->lf_flags |= brange->lf_flags & F_NOEND;
683 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
684 if (range->lf_flags & F_POSIX)
686 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
691 * Destroy deleted elements. We didn't want to do it in the loop
692 * because the free() might have blocked.
694 * Adjust the count for any posix locks we thought we might create
697 while ((range = TAILQ_FIRST(&deadlist)) != NULL) {
698 TAILQ_REMOVE(&deadlist, range, lf_link);
699 lf_destroy_range(range);
702 KKASSERT(count <= 0);
704 lf_count_change(owner, count);
708 lf_wakeup(lock, start, end);
711 if (new_range1 != NULL)
712 lf_destroy_range(new_range1);
713 if (new_range2 != NULL)
714 lf_destroy_range(new_range2);
719 * Check whether there is a blocking lock,
720 * and if so return its process identifier.
723 lf_getlock(struct flock *fl, struct lockf *lock, struct proc *owner,
724 int type, int flags, off_t start, off_t end)
726 struct lockf_range *range;
728 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
729 if (range->lf_owner != owner &&
730 lf_overlap(range, start, end) &&
731 (type == F_WRLCK || range->lf_type == F_WRLCK))
734 fl->l_type = F_UNLCK;
737 fl->l_type = range->lf_type;
738 fl->l_whence = SEEK_SET;
739 fl->l_start = range->lf_start;
740 if (range->lf_flags & F_NOEND)
743 fl->l_len = range->lf_end - range->lf_start + 1;
744 if (range->lf_owner != NULL && (range->lf_flags & F_POSIX))
745 fl->l_pid = range->lf_owner->p_pid;
752 * Wakeup pending lock attempts.
755 lf_wakeup(struct lockf *lock, off_t start, off_t end)
757 struct lockf_range *range, *nrange;
758 TAILQ_FOREACH_MUTABLE(range, &lock->lf_blocked, lf_link, nrange) {
759 if (lf_overlap(range, start, end) == 0)
761 TAILQ_REMOVE(&lock->lf_blocked, range, lf_link);
764 if (range->lf_start >= start && range->lf_end <= end)
770 * Allocate a range structure and initialize it sufficiently such that
771 * lf_destroy_range() does not barf.
773 static struct lockf_range *
776 struct lockf_range *range;
781 range = malloc(sizeof(struct lockf_range), M_LOCKF, M_WAITOK);
782 range->lf_owner = NULL;
787 lf_insert(struct lockf_range_list *list, struct lockf_range *elm,
788 struct lockf_range *insert_point)
790 while (insert_point && insert_point->lf_start < elm->lf_start)
791 insert_point = TAILQ_NEXT(insert_point, lf_link);
792 if (insert_point != NULL)
793 TAILQ_INSERT_BEFORE(insert_point, elm, lf_link);
795 TAILQ_INSERT_TAIL(list, elm, lf_link);
799 lf_create_range(struct lockf_range *range, struct proc *owner, int type,
800 int flags, off_t start, off_t end)
802 KKASSERT(start <= end);
803 range->lf_type = type;
804 range->lf_flags = flags;
805 range->lf_start = start;
807 range->lf_owner = owner;
809 lf_printf("lf_create_range: %lld..%lld\n",
810 range->lf_start, range->lf_end);
814 lf_destroy_range(struct lockf_range *range)
816 lf_printf("lf_destroy_range: %lld..%lld\n",
817 range->lf_start, range->lf_end);
818 free(range, M_LOCKF);
821 KKASSERT(lf_global_counter>=0);
828 _lf_printf(const char *ctl, ...)
833 if (lf_print_ranges) {
834 if ((p = curproc) != NULL)
835 printf("pid %d (%s): ", p->p_pid, p->p_comm);
843 _lf_print_lock(const struct lockf *lock)
845 struct lockf_range *range;
847 if (lf_print_ranges == 0)
850 if (TAILQ_EMPTY(&lock->lf_range)) {
851 lf_printf("lockf %p: no ranges locked\n", lock);
853 lf_printf("lockf %p:\n", lock);
855 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
856 printf("\t%lld..%lld type %s owned by %d\n",
857 range->lf_start, range->lf_end,
858 range->lf_type == F_RDLCK ? "shared" : "exclusive",
859 range->lf_flags & F_POSIX ? range->lf_owner->p_pid : -1);
860 if (TAILQ_EMPTY(&lock->lf_blocked))
861 printf("no process waiting for range\n");
863 printf("blocked locks:");
864 TAILQ_FOREACH(range, &lock->lf_blocked, lf_link)
865 printf("\t%lld..%lld type %s waiting on %p\n",
866 range->lf_start, range->lf_end,
867 range->lf_type == F_RDLCK ? "shared" : "exclusive",
870 #endif /* LOCKF_DEBUG */