2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * Scooter Morris at Genentech Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
39 * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
40 * $DragonFly: src/sys/kern/kern_lockf.c,v 1.15 2004/05/11 20:37:21 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52 #include <sys/resourcevar.h>
54 #include <sys/lockf.h>
55 #include <machine/limits.h> /* for LLONG_MAX */
58 int lf_global_counter = 0;
61 int lf_print_ranges = 0;
63 static void lf_print_lock(const struct lockf *);
66 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
68 static void lf_wakeup(struct lockf *, off_t, off_t);
69 static int lf_overlap(const struct lockf_range *, off_t, off_t);
70 static int lf_overlap_left(const struct lockf_range *, off_t, off_t);
71 static int lf_overlap_right(const struct lockf_range *, off_t, off_t);
72 static int lf_overlap_left2(const struct lockf_range *, off_t, off_t);
73 static int lf_overlap_right2(const struct lockf_range *, off_t, off_t);
74 static int lf_overlap_embedded(const struct lockf_range *, off_t, off_t);
75 static struct lockf_range *lf_alloc_range(void);
76 static void lf_create_range(struct lockf_range *, struct proc *, int, int,
78 static void lf_destroy_range(struct lockf_range *, int);
80 static int lf_setlock(struct lockf *, struct proc *, int, int,
82 static int lf_clearlock(struct lockf *, struct proc *, int, int,
84 static int lf_getlock(struct flock *, struct lockf *, struct proc *,
85 int, int, off_t, off_t);
87 static int lf_count_change(struct proc *, int);
90 * Change the POSIX lock accounting for the given process.
93 lf_count_adjust(struct proc *p, int increase)
99 uip = p->p_ucred->cr_uidinfo;
102 uip->ui_posixlocks += p->p_numposixlocks;
104 uip->ui_posixlocks -= p->p_numposixlocks;
106 KASSERT(uip->ui_posixlocks >= 0,
107 ("Negative number of POSIX locks held by %s user: %d.",
108 increase ? "new" : "old", uip->ui_posixlocks));
112 lf_count_change(struct proc *owner, int diff)
117 /* we might actually not have a process context */
121 uip = owner->p_ucred->cr_uidinfo;
123 max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
124 maxposixlocksperuid);
125 if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
126 uip->ui_posixlocks >= max )
129 uip->ui_posixlocks += diff;
131 KASSERT(uip->ui_posixlocks >= 0,
132 ("Negative number of POSIX locks held by user: %d.",
133 uip->ui_posixlocks));
139 * Advisory record locking support
142 lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size)
144 struct flock *fl = ap->a_fl;
147 int type, flags, error;
150 if (lock->init_done == 0) {
151 TAILQ_INIT(&lock->lf_range);
152 TAILQ_INIT(&lock->lf_blocked);
157 * Convert the flock structure into a start and end.
159 switch (fl->l_whence) {
163 * Caller is responsible for adding any necessary offset
164 * when SEEK_CUR is used.
170 start = size + fl->l_start;
178 if (fl->l_len == 0) {
182 end = start + fl->l_len - 1;
190 * This isn't really correct for flock-style locks,
191 * but the current handling is somewhat broken anyway.
193 owner = (struct proc *)ap->a_id;
196 * Do the requested operation.
198 lwkt_gettoken(&ilock, lwkt_token_pool_get(lock));
201 error = lf_setlock(lock, owner, type, flags, start, end);
205 error = lf_clearlock(lock, owner, type, flags, start, end);
209 error = lf_getlock(fl, lock, owner, type, flags, start, end);
216 lwkt_reltoken(&ilock);
221 lf_setlock(struct lockf *lock, struct proc *owner, int type, int flags,
222 off_t start, off_t end)
224 struct lockf_range *range, *first_match, *insert_point;
225 int wakeup_needed, lock_needed;
226 /* pre-allocation to avoid blocking in the middle of the algorithm */
227 struct lockf_range *new_range1 = NULL, *new_range2 = NULL;
230 /* for restauration in case of hitting the POSIX lock limit below */
231 struct lockf_range *orig_first_match = NULL;
236 if (new_range1 == NULL)
237 new_range1 = lf_alloc_range();
238 if (new_range2 == NULL)
239 new_range2 = lf_alloc_range();
249 TAILQ_FOREACH(range, &lock->lf_range, lf_link) {
250 if (insert_point == NULL && range->lf_start >= start)
251 insert_point = range;
252 if (lf_overlap(range, start, end) == 0)
254 if (range->lf_owner == owner) {
255 if (first_match == NULL)
259 if (type == F_WRLCK || range->lf_type == F_WRLCK)
264 struct lockf_range *brange;
266 if ((flags & F_WAIT) == 0) {
272 * We are blocked. For POSIX locks we have to check
273 * for deadlocks and return with EDEADLK. This is done
274 * by checking wether range->lf_owner is already
277 * Since flock-style locks cover the whole file, a
278 * deadlock between those is nearly impossible.
279 * This can only occur if a process tries to lock the
280 * same inode exclusively while holding a shared lock
281 * with another descriptor.
282 * XXX How can we cleanly detect this?
283 * XXX The current mixing of flock & fcntl/lockf is evil.
285 * Handle existing locks of flock-style like POSIX locks.
287 if (flags & F_POSIX) {
288 TAILQ_FOREACH(brange, &lock->lf_blocked, lf_link)
289 if (brange->lf_owner == range->lf_owner) {
296 * For flock-style locks, we must first remove
297 * any shared locks that we hold before we sleep
298 * waiting for an exclusive lock.
300 if ((flags & F_FLOCK) && type == F_WRLCK)
301 lf_clearlock(lock, owner, type, flags, start, end);
305 lf_create_range(brange, owner, type, 0, start, end, 0);
306 TAILQ_INSERT_TAIL(&lock->lf_blocked, brange, lf_link);
307 error = tsleep(brange, PCATCH, "lockf", 0);
310 * We may have been awaked by a signal and/or by a
311 * debugger continuing us (in which case we must remove
312 * ourselves from the blocked list) and/or by another
313 * process releasing/downgrading a lock (in which case
314 * we have already been removed from the blocked list
315 * and our lf_flags field is 1).
317 if (brange->lf_flags == 0)
318 TAILQ_REMOVE(&lock->lf_blocked, brange, lf_link);
319 lf_destroy_range(brange, 0);
326 if (first_match == NULL) {
327 if (flags & F_POSIX) {
328 if (lf_count_change(owner, 1)) {
335 lf_create_range(range, owner, type, flags, start, end, 1);
336 if (insert_point != NULL)
337 TAILQ_INSERT_BEFORE(insert_point, range, lf_link);
339 TAILQ_INSERT_TAIL(&lock->lf_range, range, lf_link);
345 if (lf_overlap_left(first_match, start, end)) {
346 KKASSERT((flags & F_POSIX) != 0);
347 if (first_match->lf_end > end) {
348 if (first_match->lf_type == type)
350 if (lf_count_change(owner, 2)) {
356 lf_create_range(range, owner, type, flags,
358 if (insert_point != NULL)
359 TAILQ_INSERT_BEFORE(insert_point, range,
362 TAILQ_INSERT_TAIL(&lock->lf_range, range,
364 insert_point = range;
367 lf_create_range(range, owner, first_match->lf_type,
368 first_match->lf_flags, end + 1,
369 first_match->lf_end, 1);
370 TAILQ_INSERT_AFTER(&lock->lf_range, insert_point,
372 first_match->lf_flags &= ~F_NOEND;
373 first_match->lf_end = start - 1;
379 * left match, but not right match
381 * handle the lf_type != type case directly,
382 * merge the other case with the !lock_needed path.
384 if (first_match->lf_type != type) {
386 * This is needed if the lockf acquisition below fails.
388 orig_first_match = first_match;
389 orig_end = first_match->lf_end;
390 orig_flags = first_match->lf_flags;
391 first_match->lf_end = start - 1;
392 first_match->lf_flags &= ~F_NOEND;
395 /* Try to find the next matching range */
396 range = TAILQ_NEXT(first_match, lf_link);
397 while (range != NULL) {
398 if (range->lf_owner == owner &&
399 lf_overlap(range, start, end))
401 range = TAILQ_NEXT(range, lf_link);
406 /* fall through to !left_match behaviour */
408 first_match->lf_end = end;
409 first_match->lf_flags |= flags & F_NOEND;
414 if (lf_overlap_embedded(first_match, start, end)) {
415 if (first_match != insert_point) {
416 TAILQ_REMOVE(&lock->lf_range, first_match, lf_link);
417 TAILQ_INSERT_BEFORE(insert_point, first_match, lf_link);
419 first_match->lf_start = start;
420 first_match->lf_end = end;
421 first_match->lf_flags |= flags & F_NOEND;
422 first_match->lf_type = type;
426 if (lock_needed == 0) {
427 struct lockf_range *nrange;
429 range = TAILQ_NEXT(first_match, lf_link);
430 while (range != NULL) {
431 if (range->lf_owner != owner) {
432 range = TAILQ_NEXT(range, lf_link);
435 if (lf_overlap_embedded(range, start, end)) {
436 nrange = TAILQ_NEXT(range, lf_link);
437 TAILQ_REMOVE(&lock->lf_range, range,
439 lf_count_change(owner, -1);
440 lf_destroy_range(range, 1);
444 if (lf_overlap_right(range, start, end) == 0) {
445 range = TAILQ_NEXT(range, lf_link);
448 if (range->lf_type != type) {
449 range->lf_start = end + 1;
450 nrange = TAILQ_NEXT(range, lf_link);
451 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
452 while (nrange != NULL) {
453 if (nrange->lf_start >= end + 1)
455 nrange = TAILQ_NEXT(nrange, lf_link);
458 TAILQ_INSERT_BEFORE(nrange, range,
461 TAILQ_INSERT_TAIL(&lock->lf_range,
465 first_match->lf_end = range->lf_end;
466 first_match->lf_flags |=
467 range->lf_flags & F_NOEND;
468 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
469 lf_count_change(owner, -1);
470 lf_destroy_range(range, 1);
476 if (lf_overlap_right(first_match, start, end)) {
477 KKASSERT((flags & F_POSIX) != 0);
478 if (first_match->lf_type == type) {
479 first_match->lf_start = start;
480 if (first_match != insert_point) {
481 TAILQ_REMOVE(&lock->lf_range, first_match,
483 TAILQ_INSERT_BEFORE(insert_point, first_match,
488 if (lf_count_change(owner, 1)) {
489 if (orig_first_match != NULL) {
490 orig_first_match->lf_end = orig_end;
491 orig_first_match->lf_flags = orig_end;
496 first_match->lf_start = end + 1;
497 KKASSERT(new_range1 != NULL);
500 lf_create_range(range, owner, type, flags, start, end, 1);
501 TAILQ_INSERT_BEFORE(insert_point, range, lf_link);
502 range = TAILQ_NEXT(first_match, lf_link);
503 TAILQ_REMOVE(&lock->lf_range, first_match, lf_link);
504 while (range != NULL) {
505 if (range->lf_start >= first_match->lf_start)
507 range = TAILQ_NEXT(range, lf_link);
510 TAILQ_INSERT_BEFORE(range, first_match, lf_link);
512 TAILQ_INSERT_TAIL(&lock->lf_range, first_match, lf_link);
522 lf_wakeup(lock, start, end);
525 if (new_range1 != NULL)
526 lf_destroy_range(new_range1, 0);
527 if (new_range2 != NULL)
528 lf_destroy_range(new_range2, 0);
533 lf_clearlock(struct lockf *lock, struct proc *owner, int type, int flags,
534 off_t start, off_t end)
536 struct lockf_range *range, *trange;
537 struct lockf_range *new_range;
540 new_range = lf_alloc_range();
542 TAILQ_FOREACH_MUTABLE(range, &lock->lf_range, lf_link, trange) {
543 if (range->lf_end < start)
545 if (range->lf_start > end)
547 if (range->lf_owner != owner)
549 if (lf_overlap_embedded(range, start, end)) {
550 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
551 /* flock-locks are equal */
552 if (range->lf_flags & F_POSIX)
553 lf_count_change(owner, -1);
554 lf_destroy_range(range, 1);
557 if (lf_overlap_left2(range, start, end)) {
558 KKASSERT(range->lf_flags & F_POSIX);
559 if (lf_overlap_right2(range, start, end)) {
560 struct lockf_range *nrange;
562 if (lf_count_change(owner, 1)) {
568 lf_create_range(nrange, range->lf_owner,
569 range->lf_type, range->lf_flags,
570 end + 1, range->lf_end, 1);
571 range->lf_end = start;
572 range->lf_flags &= ~F_NOEND;
573 for (; range != NULL;
574 range = TAILQ_NEXT(range, lf_link))
575 if (range->lf_start >= nrange->lf_start)
578 TAILQ_INSERT_BEFORE(range, nrange,
581 TAILQ_INSERT_TAIL(&lock->lf_range,
585 range->lf_end = start - 1;
586 range->lf_flags &= ~F_NOEND;
589 if (lf_overlap_right2(range, start, end)) {
590 struct lockf_range *nrange = range;
592 KKASSERT(range->lf_flags & F_POSIX);
594 range = TAILQ_NEXT(range, lf_link);
595 TAILQ_REMOVE(&lock->lf_range, nrange, lf_link);
596 for (; range != NULL;
597 range = TAILQ_NEXT(range, lf_link))
598 if (range->lf_start >= nrange->lf_start)
601 TAILQ_INSERT_BEFORE(range, nrange, lf_link);
603 TAILQ_INSERT_TAIL(&lock->lf_range, nrange,
609 lf_wakeup(lock, start, end);
613 if (new_range != NULL)
614 lf_destroy_range(new_range, 0);
620 * Check whether there is a blocking lock,
621 * and if so return its process identifier.
624 lf_getlock(struct flock *fl, struct lockf *lock, struct proc *owner,
625 int type, int flags, off_t start, off_t end)
627 struct lockf_range *range;
629 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
630 if (range->lf_owner != owner &&
631 lf_overlap(range, start, end) &&
632 (type == F_WRLCK || range->lf_type == F_WRLCK))
635 fl->l_type = F_UNLCK;
638 fl->l_type = range->lf_type;
639 fl->l_whence = SEEK_SET;
640 fl->l_start = range->lf_start;
641 if (range->lf_flags & F_NOEND)
644 fl->l_len = range->lf_end - range->lf_start + 1;
645 if (range->lf_owner != NULL && (range->lf_flags & F_POSIX))
646 fl->l_pid = range->lf_owner->p_pid;
653 * Check wether range and [start, end] overlap.
656 lf_overlap(const struct lockf_range *range, off_t start, off_t end)
658 if (range->lf_start >= start && range->lf_start <= end)
660 else if (start >= range->lf_start && start <= range->lf_end)
667 * Wakeup pending lock attempts.
670 lf_wakeup(struct lockf *lock, off_t start, off_t end)
672 struct lockf_range *range, *nrange;
673 TAILQ_FOREACH_MUTABLE(range, &lock->lf_blocked, lf_link, nrange) {
674 if (lf_overlap(range, start, end) == 0)
676 TAILQ_REMOVE(&lock->lf_blocked, range, lf_link);
683 lf_overlap_left(const struct lockf_range *range, off_t start, off_t end)
685 if (range->lf_start < start && range->lf_end >= start - 1 &&
686 range->lf_end <= end)
694 lf_overlap_right(const struct lockf_range *range, off_t start, off_t end)
696 if (range->lf_end > end && range->lf_start >= start &&
697 range->lf_start - 1 <= end)
704 lf_overlap_left2(const struct lockf_range *range, off_t start, off_t end)
706 if (range->lf_start < start && range->lf_end >= start &&
707 range->lf_end <= end)
715 lf_overlap_right2(const struct lockf_range *range, off_t start, off_t end)
717 if (range->lf_end > end && range->lf_start >= start &&
718 range->lf_start <= end)
725 lf_overlap_embedded(const struct lockf_range *range, off_t start, off_t end)
727 if (range->lf_start >= start && range->lf_end <= end)
734 * Allocate a range structure and initialize it sufficiently such that
735 * lf_destroy_range() does not barf.
737 static struct lockf_range *
740 struct lockf_range *range;
745 range = malloc(sizeof(struct lockf_range), M_LOCKF, M_WAITOK);
746 range->lf_owner = NULL;
751 lf_create_range(struct lockf_range *range, struct proc *owner, int type,
752 int flags, off_t start, off_t end, int accounting)
754 KKASSERT(start <= end);
755 if (owner != NULL && (flags & F_POSIX) && accounting)
756 ++owner->p_numposixlocks;
757 range->lf_type = type;
758 range->lf_flags = flags;
759 range->lf_start = start;
761 range->lf_owner = owner;
765 printf("lf_create_range: %lld..%lld\n", range->lf_start,
771 lf_destroy_range(struct lockf_range *range, int accounting)
773 struct proc *owner = range->lf_owner;
774 int flags = range->lf_flags;
778 printf("lf_destroy_range: %lld..%lld\n", range->lf_start,
782 free(range, M_LOCKF);
783 if (owner != NULL && (flags & F_POSIX) && accounting) {
784 --owner->p_numposixlocks;
785 KASSERT(owner->p_numposixlocks >= 0,
786 ("Negative number of POSIX locks held by process: %d",
787 owner->p_numposixlocks));
792 KKASSERT(lf_global_counter>=0);
798 lf_print_lock(const struct lockf *lock)
800 struct lockf_range *range;
802 if (TAILQ_EMPTY(&lock->lf_range))
803 printf("lockf %p: no ranges locked\n", lock);
805 printf("lockf %p:\n", lock);
806 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
807 printf("\t%lld..%lld type %s owned by %d\n",
808 range->lf_start, range->lf_end,
809 range->lf_type == F_RDLCK ? "shared" : "exclusive",
810 range->lf_flags & F_POSIX ? range->lf_owner->p_pid : -1);
811 if (TAILQ_EMPTY(&lock->lf_blocked))
812 printf("no process waiting for range\n");
814 printf("blocked locks:");
815 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
816 printf("\t%lld..%lld type %s waiting on %p\n",
817 range->lf_start, range->lf_end,
818 range->lf_type == F_RDLCK ? "shared" : "exclusive",
821 #endif /* LOCKF_DEBUG */