Disallow writes to filesystems mounted read-only via NULLFS. In this case
[dragonfly.git] / sys / kern / kern_lockf.c
CommitLineData
984263bc 1/*
0a019f0d 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
a3190466 3 * Copyright (c) 2006 Matthew Dillon <dillon@backplane.com>. All rights reserved.
508ceb09 4 *
984263bc
MD
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Scooter Morris at Genentech Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
40 * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
efda3bd0 41 * $DragonFly: src/sys/kern/kern_lockf.c,v 1.34 2006/09/05 00:55:45 dillon Exp $
984263bc
MD
42 */
43
984263bc
MD
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/proc.h>
49#include <sys/unistd.h>
50#include <sys/vnode.h>
51#include <sys/malloc.h>
52#include <sys/fcntl.h>
508ceb09 53#include <sys/resourcevar.h>
984263bc
MD
54
55#include <sys/lockf.h>
508ceb09 56#include <machine/limits.h> /* for LLONG_MAX */
6449cdd4 57#include <machine/stdarg.h>
508ceb09
JS
58
59#ifdef INVARIANTS
60int lf_global_counter = 0;
61#endif
6449cdd4 62
508ceb09
JS
63#ifdef LOCKF_DEBUG
64int lf_print_ranges = 0;
65
6449cdd4
MD
66static void _lf_print_lock(const struct lockf *);
67static void _lf_printf(const char *, ...);
68
69#define lf_print_lock(lock) if (lf_print_ranges) _lf_print_lock(lock)
70#define lf_printf(ctl, args...) if (lf_print_ranges) _lf_printf(ctl, args)
71#else
72#define lf_print_lock(lock)
73#define lf_printf(ctl, args...)
508ceb09
JS
74#endif
75
76static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
77
78static void lf_wakeup(struct lockf *, off_t, off_t);
4f0a1741
JS
79static struct lockf_range *lf_alloc_range(void);
80static void lf_create_range(struct lockf_range *, struct proc *, int, int,
cae44669 81 off_t, off_t);
a3190466
MD
82static void lf_insert(struct lockf_range_list *list,
83 struct lockf_range *elm,
84 struct lockf_range *insert_point);
cae44669 85static void lf_destroy_range(struct lockf_range *);
508ceb09
JS
86
87static int lf_setlock(struct lockf *, struct proc *, int, int,
88 off_t, off_t);
508ceb09
JS
89static int lf_getlock(struct flock *, struct lockf *, struct proc *,
90 int, int, off_t, off_t);
91
92static int lf_count_change(struct proc *, int);
984263bc 93
a3190466
MD
94/*
95 * Return TRUE (non-zero) if the type and posix flags match.
96 */
97static __inline
98int
99lf_match(struct lockf_range *range, int type, int flags)
100{
101 if (range->lf_type != type)
102 return(0);
103 if ((range->lf_flags ^ flags) & F_POSIX)
104 return(0);
105 return(1);
106}
107
108/*
109 * Check whether range and [start, end] overlap.
110 */
111static __inline
112int
113lf_overlap(const struct lockf_range *range, off_t start, off_t end)
114{
115 if (range->lf_start >= start && range->lf_start <= end)
116 return(1);
117 else if (start >= range->lf_start && start <= range->lf_end)
118 return(1);
119 else
120 return(0);
121}
122
123
984263bc 124/*
508ceb09 125 * Change the POSIX lock accounting for the given process.
984263bc 126 */
508ceb09 127void
b76a3852 128lf_count_adjust(struct proc *p, int increase)
508ceb09
JS
129{
130 struct uidinfo *uip;
984263bc 131
508ceb09 132 KKASSERT(p != NULL);
984263bc 133
508ceb09 134 uip = p->p_ucred->cr_uidinfo;
984263bc 135
b76a3852
JS
136 if (increase)
137 uip->ui_posixlocks += p->p_numposixlocks;
138 else
139 uip->ui_posixlocks -= p->p_numposixlocks;
984263bc 140
508ceb09 141 KASSERT(uip->ui_posixlocks >= 0,
b76a3852
JS
142 ("Negative number of POSIX locks held by %s user: %d.",
143 increase ? "new" : "old", uip->ui_posixlocks));
508ceb09 144}
984263bc 145
508ceb09
JS
146static int
147lf_count_change(struct proc *owner, int diff)
148{
149 struct uidinfo *uip;
150 int max;
151
152 /* we might actually not have a process context */
153 if (owner == NULL)
154 return(0);
155
156 uip = owner->p_ucred->cr_uidinfo;
157
158 max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
159 maxposixlocksperuid);
160 if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
cae44669 161 uip->ui_posixlocks >= max ) {
508ceb09 162 return(1);
cae44669 163 }
508ceb09
JS
164
165 uip->ui_posixlocks += diff;
cae44669 166 owner->p_numposixlocks += diff;
984263bc 167
508ceb09
JS
168 KASSERT(uip->ui_posixlocks >= 0,
169 ("Negative number of POSIX locks held by user: %d.",
170 uip->ui_posixlocks));
cae44669
MD
171 KASSERT(owner->p_numposixlocks >= 0,
172 ("Negative number of POSIX locks held by proc: %d.",
173 uip->ui_posixlocks));
508ceb09
JS
174
175 return(0);
176}
984263bc
MD
177
178/*
179 * Advisory record locking support
180 */
181int
508ceb09 182lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size)
984263bc 183{
1fd87d54 184 struct flock *fl = ap->a_fl;
508ceb09 185 struct proc *owner;
984263bc 186 off_t start, end;
97ef7fc2
JS
187 int type, flags, error;
188 lwkt_tokref ilock;
508ceb09 189
984263bc
MD
190 /*
191 * Convert the flock structure into a start and end.
192 */
193 switch (fl->l_whence) {
984263bc
MD
194 case SEEK_SET:
195 case SEEK_CUR:
196 /*
197 * Caller is responsible for adding any necessary offset
198 * when SEEK_CUR is used.
199 */
200 start = fl->l_start;
201 break;
202
203 case SEEK_END:
204 start = size + fl->l_start;
205 break;
206
207 default:
508ceb09 208 return(EINVAL);
984263bc
MD
209 }
210 if (start < 0)
508ceb09
JS
211 return(EINVAL);
212 if (fl->l_len == 0) {
213 flags |= F_NOEND;
86fc1543 214 end = LLONG_MAX;
508ceb09 215 } else {
984263bc
MD
216 end = start + fl->l_len - 1;
217 if (end < start)
508ceb09 218 return(EINVAL);
984263bc 219 }
508ceb09
JS
220
221 flags = ap->a_flags;
222 type = fl->l_type;
984263bc 223 /*
508ceb09
JS
224 * This isn't really correct for flock-style locks,
225 * but the current handling is somewhat broken anyway.
984263bc 226 */
508ceb09
JS
227 owner = (struct proc *)ap->a_id;
228
984263bc
MD
229 /*
230 * Do the requested operation.
231 */
97ef7fc2 232 lwkt_gettoken(&ilock, lwkt_token_pool_get(lock));
0e23bc1f
JS
233
234 if (lock->init_done == 0) {
235 TAILQ_INIT(&lock->lf_range);
236 TAILQ_INIT(&lock->lf_blocked);
237 lock->init_done = 1;
238 }
239
984263bc
MD
240 switch(ap->a_op) {
241 case F_SETLK:
93df580f
MD
242 /*
243 * NOTE: It is possible for both lf_range and lf_blocked to
244 * be empty if we block and get woken up, but another process
245 * then gets in and issues an unlock. So VMAYHAVELOCKS must
246 * be set after the lf_setlock() operation completes rather
247 * then before.
248 */
97ef7fc2 249 error = lf_setlock(lock, owner, type, flags, start, end);
93df580f 250 ap->a_vp->v_flag |= VMAYHAVELOCKS;
97ef7fc2 251 break;
984263bc
MD
252
253 case F_UNLCK:
a3190466 254 error = lf_setlock(lock, owner, type, flags, start, end);
2792dfce
MD
255 if (TAILQ_EMPTY(&lock->lf_range) &&
256 TAILQ_EMPTY(&lock->lf_blocked)) {
257 ap->a_vp->v_flag &= ~VMAYHAVELOCKS;
258 }
97ef7fc2 259 break;
984263bc
MD
260
261 case F_GETLK:
97ef7fc2
JS
262 error = lf_getlock(fl, lock, owner, type, flags, start, end);
263 break;
984263bc
MD
264
265 default:
97ef7fc2
JS
266 error = EINVAL;
267 break;
984263bc 268 }
97ef7fc2
JS
269 lwkt_reltoken(&ilock);
270 return(error);
984263bc
MD
271}
272
984263bc 273static int
508ceb09
JS
274lf_setlock(struct lockf *lock, struct proc *owner, int type, int flags,
275 off_t start, off_t end)
984263bc 276{
a3190466
MD
277 struct lockf_range *range;
278 struct lockf_range *brange;
279 struct lockf_range *next;
280 struct lockf_range *first_match;
281 struct lockf_range *last_match;
282 struct lockf_range *insert_point;
283 struct lockf_range *new_range1;
284 struct lockf_range *new_range2;
285 int wakeup_needed;
286 int double_clip;
4f0a1741 287 int error = 0;
cae44669 288 int count;
a3190466
MD
289 struct lockf_range_list deadlist;
290
291 new_range1 = NULL;
292 new_range2 = NULL;
293 count = 0;
508ceb09
JS
294
295restart:
a3190466
MD
296 /*
297 * Preallocate two ranges so we don't have to worry about blocking
298 * in the middle of the lock code.
299 */
4f0a1741
JS
300 if (new_range1 == NULL)
301 new_range1 = lf_alloc_range();
302 if (new_range2 == NULL)
303 new_range2 = lf_alloc_range();
508ceb09 304 first_match = NULL;
a3190466 305 last_match = NULL;
508ceb09
JS
306 insert_point = NULL;
307 wakeup_needed = 0;
984263bc 308
6449cdd4 309 lf_print_lock(lock);
984263bc 310
a3190466
MD
311 /*
312 * Locate the insertion point for the new lock (the first range
313 * with an lf_start >= start).
314 *
315 * Locate the first and latch ranges owned by us that overlap
316 * the requested range.
317 */
508ceb09
JS
318 TAILQ_FOREACH(range, &lock->lf_range, lf_link) {
319 if (insert_point == NULL && range->lf_start >= start)
320 insert_point = range;
a3190466
MD
321
322 /*
323 * Skip non-overlapping locks. Locks are sorted by lf_start
324 * So we can terminate the search when lf_start exceeds the
325 * requested range (insert_point is still guarenteed to be
326 * set properly).
327 */
328 if (range->lf_end < start)
508ceb09 329 continue;
a3190466
MD
330 if (range->lf_start > end) {
331 range = NULL;
332 break;
333 }
334
335 /*
336 * Overlapping lock. Set first_match and last_match if we
337 * are the owner.
338 */
508ceb09
JS
339 if (range->lf_owner == owner) {
340 if (first_match == NULL)
341 first_match = range;
a3190466 342 last_match = range;
508ceb09 343 continue;
984263bc 344 }
a3190466
MD
345
346 /*
347 * If we aren't the owner check for a conflicting lock. Only
348 * if not unlocking.
349 */
350 if (type != F_UNLCK) {
351 if (type == F_WRLCK || range->lf_type == F_WRLCK)
352 break;
353 }
508ceb09
JS
354 }
355
a3190466
MD
356 /*
357 * If a conflicting lock was observed, block or fail as appropriate.
358 * (this code is skipped when unlocking)
359 */
508ceb09 360 if (range != NULL) {
4f0a1741
JS
361 if ((flags & F_WAIT) == 0) {
362 error = EAGAIN;
363 goto do_cleanup;
364 }
508ceb09 365
984263bc 366 /*
508ceb09
JS
367 * We are blocked. For POSIX locks we have to check
368 * for deadlocks and return with EDEADLK. This is done
3d8f95ac 369 * by checking whether range->lf_owner is already
508ceb09
JS
370 * blocked.
371 *
372 * Since flock-style locks cover the whole file, a
373 * deadlock between those is nearly impossible.
374 * This can only occur if a process tries to lock the
375 * same inode exclusively while holding a shared lock
376 * with another descriptor.
377 * XXX How can we cleanly detect this?
378 * XXX The current mixing of flock & fcntl/lockf is evil.
984263bc 379 *
508ceb09 380 * Handle existing locks of flock-style like POSIX locks.
984263bc 381 */
508ceb09
JS
382 if (flags & F_POSIX) {
383 TAILQ_FOREACH(brange, &lock->lf_blocked, lf_link)
4f0a1741
JS
384 if (brange->lf_owner == range->lf_owner) {
385 error = EDEADLK;
386 goto do_cleanup;
387 }
984263bc 388 }
508ceb09 389
984263bc 390 /*
508ceb09 391 * For flock-style locks, we must first remove
984263bc
MD
392 * any shared locks that we hold before we sleep
393 * waiting for an exclusive lock.
394 */
71c18fe3 395 if ((flags & F_POSIX) == 0 && type == F_WRLCK)
a3190466 396 lf_setlock(lock, owner, F_UNLCK, 0, start, end);
508ceb09 397
4f0a1741
JS
398 brange = new_range1;
399 new_range1 = NULL;
cae44669 400 lf_create_range(brange, owner, type, 0, start, end);
508ceb09
JS
401 TAILQ_INSERT_TAIL(&lock->lf_blocked, brange, lf_link);
402 error = tsleep(brange, PCATCH, "lockf", 0);
403
984263bc 404 /*
508ceb09
JS
405 * We may have been awaked by a signal and/or by a
406 * debugger continuing us (in which case we must remove
984263bc 407 * ourselves from the blocked list) and/or by another
508ceb09
JS
408 * process releasing/downgrading a lock (in which case
409 * we have already been removed from the blocked list
410 * and our lf_flags field is 1).
a3190466
MD
411 *
412 * Sleep if it looks like we might be livelocking.
984263bc 413 */
508ceb09
JS
414 if (brange->lf_flags == 0)
415 TAILQ_REMOVE(&lock->lf_blocked, brange, lf_link);
a3190466
MD
416 if (count == 2)
417 tsleep(brange, 0, "lockfz", 2);
418 else
419 ++count;
cae44669 420 lf_destroy_range(brange);
508ceb09
JS
421
422 if (error)
4f0a1741 423 goto do_cleanup;
508ceb09
JS
424 goto restart;
425 }
426
a3190466
MD
427 /*
428 * If there are no overlapping locks owned by us then creating
429 * the new lock is easy. This is the most common case.
430 */
508ceb09 431 if (first_match == NULL) {
a3190466
MD
432 if (type == F_UNLCK)
433 goto do_wakeup;
508ceb09 434 if (flags & F_POSIX) {
4f0a1741
JS
435 if (lf_count_change(owner, 1)) {
436 error = ENOLCK;
437 goto do_cleanup;
438 }
984263bc 439 }
4f0a1741
JS
440 range = new_range1;
441 new_range1 = NULL;
cae44669 442 lf_create_range(range, owner, type, flags, start, end);
a3190466 443 lf_insert(&lock->lf_range, range, insert_point);
508ceb09 444 goto do_wakeup;
984263bc 445 }
508ceb09 446
a3190466
MD
447 /*
448 * This is a special case that we need to check for in a couple
449 * of places.
450 */
451 if (first_match == last_match && first_match->lf_start < start &&
452 last_match->lf_end > end) {
453 double_clip = 1;
454 } else {
455 double_clip = 0;
456 }
cae44669 457
a3190466
MD
458 /*
459 * Figure out the worst case net increase in POSIX locks and account
460 * for it now before we start modifying things. If neither the
461 * first or last locks match we have an issue. If there is only
462 * one overlapping range which needs to be clipped on both ends
463 * we wind up having to create up to two new locks, else only one.
464 *
e95b513d
MD
465 * When unlocking the worst case is always 1 new lock if our
466 * unlock request cuts the middle out of an existing lock range.
467 *
a3190466
MD
468 * count represents the 'cleanup' adjustment needed. It starts
469 * negative, is incremented whenever we create a new POSIX lock,
470 * and decremented whenever we delete an existing one. At the
471 * end of the day it had better be <= 0 or we didn't calculate the
472 * worse case properly here.
473 */
474 count = 0;
475 if (flags & F_POSIX) {
476 if (!lf_match(first_match, type, flags) &&
477 !lf_match(last_match, type, flags)
478 ) {
e95b513d 479 if (double_clip && type != F_UNLCK)
a3190466 480 count = -2;
508ceb09 481 else
a3190466
MD
482 count = -1;
483 }
484 if (count && lf_count_change(owner, -count)) {
485 error = ENOLCK;
486 goto do_cleanup;
508ceb09 487 }
a3190466 488 }
e95b513d 489 /* else flock style lock which encompasses entire range */
a3190466
MD
490
491 /*
492 * Create and insert the lock represented the requested range.
493 * Adjust the net POSIX lock count. We have to move our insertion
494 * point since brange now represents the first record >= start.
495 *
496 * When unlocking, no new lock is inserted but we still clip.
497 */
498 if (type != F_UNLCK) {
499 brange = new_range1;
500 new_range1 = NULL;
501 lf_create_range(brange, owner, type, flags, start, end);
502 lf_insert(&lock->lf_range, brange, insert_point);
503 insert_point = brange;
504 if (flags & F_POSIX)
505 ++count;
506 } else {
507 brange = NULL;
508 }
509
510 /*
511 * Handle the double_clip case. This is the only case where
512 * we wind up having to add TWO locks.
513 */
514 if (double_clip) {
515 KKASSERT(first_match == last_match);
516 last_match = new_range2;
517 new_range2 = NULL;
518 lf_create_range(last_match, first_match->lf_owner,
519 first_match->lf_type, first_match->lf_flags,
520 end + 1, first_match->lf_end);
521 first_match->lf_end = start - 1;
522 first_match->lf_flags &= ~F_NOEND;
cae44669 523
984263bc 524 /*
a3190466 525 * Figure out where to insert the right side clip.
984263bc 526 */
a3190466
MD
527 lf_insert(&lock->lf_range, last_match, first_match);
528 if (last_match->lf_flags & F_POSIX)
529 ++count;
508ceb09 530 }
984263bc 531
cae44669 532 /*
a3190466
MD
533 * Clip or destroy the locks between first_match and last_match,
534 * inclusive. Ignore the primary lock we created (brange). Note
535 * that if double-clipped, first_match and last_match will be
536 * outside our clipping range. Otherwise first_match and last_match
537 * will be deleted.
538 *
539 * We have already taken care of any double clipping.
540 *
e95b513d
MD
541 * The insert_point may become invalid as we delete records, do not
542 * use that pointer any more. Also, when removing something other
543 * then 'range' we have to check to see if the item we are removing
544 * is 'next' and adjust 'next' properly.
a3190466
MD
545 *
546 * NOTE: brange will be NULL if F_UNLCKing.
cae44669 547 */
a3190466
MD
548 TAILQ_INIT(&deadlist);
549 next = first_match;
550
551 while ((range = next) != NULL) {
552 next = TAILQ_NEXT(range, lf_link);
553
cae44669 554 /*
a3190466
MD
555 * Ignore elements that we do not own and ignore the
556 * primary request range which we just created.
cae44669 557 */
a3190466
MD
558 if (range->lf_owner != owner || range == brange)
559 continue;
cae44669
MD
560
561 /*
a3190466 562 * We may have to wakeup a waiter when downgrading a lock.
cae44669 563 */
a3190466
MD
564 if (type == F_UNLCK)
565 wakeup_needed = 1;
566 if (type == F_RDLCK && range->lf_type == F_WRLCK)
567 wakeup_needed = 1;
508ceb09 568
a3190466 569 /*
e95b513d
MD
570 * Clip left. This can only occur on first_match.
571 *
572 * Merge the left clip with brange if possible. This must
573 * be done specifically, not in the optimized merge heuristic
574 * below, since we may have counted on it in our 'count'
575 * calculation above.
a3190466
MD
576 */
577 if (range->lf_start < start) {
578 KKASSERT(range == first_match);
e95b513d
MD
579 if (brange &&
580 range->lf_end >= start - 1 &&
581 lf_match(range, type, flags)) {
582 range->lf_end = brange->lf_end;
583 range->lf_flags |= brange->lf_flags & F_NOEND;
584 /*
585 * Removing something other then 'range',
586 * adjust 'next' if necessary.
587 */
588 if (next == brange)
589 next = TAILQ_NEXT(next, lf_link);
590 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
591 if (brange->lf_flags & F_POSIX)
592 --count;
593 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
594 brange = range;
595 } else if (range->lf_end >= start) {
a3190466 596 range->lf_end = start - 1;
e95b513d
MD
597 if (type != F_UNLCK)
598 range->lf_flags &= ~F_NOEND;
508ceb09 599 }
a3190466 600 if (range == last_match)
984263bc 601 break;
a3190466
MD
602 continue;
603 }
cae44669 604
a3190466 605 /*
e95b513d
MD
606 * Clip right. This can only occur on last_match.
607 *
608 * Merge the right clip if possible. This must be done
609 * specifically, not in the optimized merge heuristic
610 * below, since we may have counted on it in our 'count'
611 * calculation.
a3190466
MD
612 *
613 * Since we are adjusting lf_start, we have to move the
614 * record to maintain the sorted list. Since lf_start is
615 * only getting larger we can use the next element as the
616 * insert point (we don't have to backtrack).
617 */
618 if (range->lf_end > end) {
619 KKASSERT(range == last_match);
e95b513d
MD
620 if (brange &&
621 range->lf_start <= end + 1 &&
622 lf_match(range, type, flags)) {
623 brange->lf_end = range->lf_end;
624 brange->lf_flags |= range->lf_flags & F_NOEND;
625 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
626 if (range->lf_flags & F_POSIX)
627 --count;
628 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
629 } else if (range->lf_start <= end) {
630 range->lf_start = end + 1;
631 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
632 lf_insert(&lock->lf_range, range, next);
a3190466
MD
633 }
634 /* range == last_match, we are done */
984263bc 635 break;
508ceb09 636 }
a3190466
MD
637
638 /*
639 * The record must be entirely enclosed. Note that the
640 * record could be first_match or last_match, and will be
641 * deleted.
642 */
643 KKASSERT(range->lf_start >= start && range->lf_end <= end);
644 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
645 if (range->lf_flags & F_POSIX)
646 --count;
647 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
648 if (range == last_match)
649 break;
508ceb09 650 }
984263bc 651
a3190466
MD
652 /*
653 * Attempt to merge locks adjacent to brange. For example, we may
654 * have had to clip first_match and/or last_match, and they might
655 * be adjacent. Or there might simply have been an adjacent lock
656 * already there.
657 *
658 * Don't get fancy, just check adjacent elements in the list if they
659 * happen to be owned by us.
e95b513d
MD
660 *
661 * This case only gets hit if we have a situation where a shared
662 * and exclusive lock are adjacent, and the exclusive lock is
663 * downgraded to shared or the shared lock is upgraded to exclusive.
a3190466
MD
664 */
665 if (brange) {
666 range = TAILQ_PREV(brange, lockf_range_list, lf_link);
667 if (range &&
668 range->lf_owner == owner &&
669 range->lf_end == brange->lf_start - 1 &&
670 lf_match(range, type, flags)
671 ) {
672 /*
e95b513d 673 * Extend range to cover brange and scrap brange.
a3190466
MD
674 */
675 range->lf_end = brange->lf_end;
676 range->lf_flags |= brange->lf_flags & F_NOEND;
677 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
678 if (brange->lf_flags & F_POSIX)
679 --count;
680 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
681 brange = range;
984263bc 682 }
a3190466
MD
683 range = TAILQ_NEXT(brange, lf_link);
684 if (range &&
685 range->lf_owner == owner &&
686 range->lf_start == brange->lf_end + 1 &&
687 lf_match(range, type, flags)
688 ) {
689 /*
e95b513d 690 * Extend brange to cover range and scrap range.
a3190466
MD
691 */
692 brange->lf_end = range->lf_end;
93df580f 693 brange->lf_flags |= range->lf_flags & F_NOEND;
a3190466
MD
694 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
695 if (range->lf_flags & F_POSIX)
696 --count;
697 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
508ceb09 698 }
984263bc 699 }
508ceb09 700
a3190466
MD
701 /*
702 * Destroy deleted elements. We didn't want to do it in the loop
703 * because the free() might have blocked.
704 *
705 * Adjust the count for any posix locks we thought we might create
706 * but didn't.
707 */
708 while ((range = TAILQ_FIRST(&deadlist)) != NULL) {
709 TAILQ_REMOVE(&deadlist, range, lf_link);
710 lf_destroy_range(range);
711 }
712
713 KKASSERT(count <= 0);
714 if (count < 0)
715 lf_count_change(owner, count);
508ceb09 716do_wakeup:
6449cdd4 717 lf_print_lock(lock);
508ceb09
JS
718 if (wakeup_needed)
719 lf_wakeup(lock, start, end);
4f0a1741
JS
720 error = 0;
721do_cleanup:
722 if (new_range1 != NULL)
cae44669 723 lf_destroy_range(new_range1);
4f0a1741 724 if (new_range2 != NULL)
cae44669 725 lf_destroy_range(new_range2);
4f0a1741 726 return(error);
984263bc
MD
727}
728
984263bc
MD
729/*
730 * Check whether there is a blocking lock,
731 * and if so return its process identifier.
732 */
733static int
508ceb09
JS
734lf_getlock(struct flock *fl, struct lockf *lock, struct proc *owner,
735 int type, int flags, off_t start, off_t end)
984263bc 736{
508ceb09 737 struct lockf_range *range;
984263bc 738
508ceb09
JS
739 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
740 if (range->lf_owner != owner &&
741 lf_overlap(range, start, end) &&
742 (type == F_WRLCK || range->lf_type == F_WRLCK))
743 break;
744 if (range == NULL) {
984263bc 745 fl->l_type = F_UNLCK;
508ceb09 746 return(0);
984263bc 747 }
508ceb09
JS
748 fl->l_type = range->lf_type;
749 fl->l_whence = SEEK_SET;
750 fl->l_start = range->lf_start;
751 if (range->lf_flags & F_NOEND)
752 fl->l_len = 0;
753 else
754 fl->l_len = range->lf_end - range->lf_start + 1;
755 if (range->lf_owner != NULL && (range->lf_flags & F_POSIX))
756 fl->l_pid = range->lf_owner->p_pid;
757 else
758 fl->l_pid = -1;
759 return(0);
984263bc
MD
760}
761
984263bc 762/*
ad936517
MD
763 * Wakeup pending lock attempts. Theoretically we can stop as soon as
764 * we encounter an exclusive request that covers the whole range (at least
765 * insofar as the sleep code above calls lf_wakeup() if it would otherwise
766 * exit instead of loop), but for now just wakeup all overlapping
767 * requests. XXX
984263bc 768 */
508ceb09
JS
769static void
770lf_wakeup(struct lockf *lock, off_t start, off_t end)
984263bc 771{
508ceb09 772 struct lockf_range *range, *nrange;
ad936517 773
508ceb09
JS
774 TAILQ_FOREACH_MUTABLE(range, &lock->lf_blocked, lf_link, nrange) {
775 if (lf_overlap(range, start, end) == 0)
984263bc 776 continue;
508ceb09
JS
777 TAILQ_REMOVE(&lock->lf_blocked, range, lf_link);
778 range->lf_flags = 1;
779 wakeup(range);
984263bc 780 }
984263bc
MD
781}
782
eacc5025
MD
783/*
784 * Allocate a range structure and initialize it sufficiently such that
785 * lf_destroy_range() does not barf.
786 */
508ceb09 787static struct lockf_range *
4f0a1741 788lf_alloc_range(void)
508ceb09 789{
eacc5025
MD
790 struct lockf_range *range;
791
4f0a1741
JS
792#ifdef INVARIANTS
793 lf_global_counter++;
794#endif
efda3bd0 795 range = kmalloc(sizeof(struct lockf_range), M_LOCKF, M_WAITOK);
eacc5025
MD
796 range->lf_owner = NULL;
797 return(range);
4f0a1741 798}
508ceb09 799
a3190466
MD
800static void
801lf_insert(struct lockf_range_list *list, struct lockf_range *elm,
802 struct lockf_range *insert_point)
803{
804 while (insert_point && insert_point->lf_start < elm->lf_start)
805 insert_point = TAILQ_NEXT(insert_point, lf_link);
806 if (insert_point != NULL)
807 TAILQ_INSERT_BEFORE(insert_point, elm, lf_link);
808 else
809 TAILQ_INSERT_TAIL(list, elm, lf_link);
810}
811
4f0a1741
JS
812static void
813lf_create_range(struct lockf_range *range, struct proc *owner, int type,
cae44669 814 int flags, off_t start, off_t end)
4f0a1741 815{
508ceb09 816 KKASSERT(start <= end);
508ceb09
JS
817 range->lf_type = type;
818 range->lf_flags = flags;
819 range->lf_start = start;
820 range->lf_end = end;
821 range->lf_owner = owner;
984263bc 822
6449cdd4
MD
823 lf_printf("lf_create_range: %lld..%lld\n",
824 range->lf_start, range->lf_end);
984263bc
MD
825}
826
984263bc 827static void
cae44669 828lf_destroy_range(struct lockf_range *range)
984263bc 829{
6449cdd4 830 lf_printf("lf_destroy_range: %lld..%lld\n",
cae44669 831 range->lf_start, range->lf_end);
efda3bd0 832 kfree(range, M_LOCKF);
508ceb09
JS
833#ifdef INVARIANTS
834 lf_global_counter--;
835 KKASSERT(lf_global_counter>=0);
836#endif
984263bc
MD
837}
838
839#ifdef LOCKF_DEBUG
6449cdd4 840
508ceb09 841static void
6449cdd4
MD
842_lf_printf(const char *ctl, ...)
843{
844 struct proc *p;
845 __va_list va;
846
847 if (lf_print_ranges) {
848 if ((p = curproc) != NULL)
849 printf("pid %d (%s): ", p->p_pid, p->p_comm);
850 }
851 __va_start(va, ctl);
852 vprintf(ctl, va);
853 __va_end(va);
854}
855
856static void
857_lf_print_lock(const struct lockf *lock)
984263bc 858{
508ceb09 859 struct lockf_range *range;
984263bc 860
6449cdd4
MD
861 if (lf_print_ranges == 0)
862 return;
863
864 if (TAILQ_EMPTY(&lock->lf_range)) {
865 lf_printf("lockf %p: no ranges locked\n", lock);
866 } else {
867 lf_printf("lockf %p:\n", lock);
868 }
508ceb09
JS
869 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
870 printf("\t%lld..%lld type %s owned by %d\n",
871 range->lf_start, range->lf_end,
872 range->lf_type == F_RDLCK ? "shared" : "exclusive",
873 range->lf_flags & F_POSIX ? range->lf_owner->p_pid : -1);
874 if (TAILQ_EMPTY(&lock->lf_blocked))
875 printf("no process waiting for range\n");
984263bc 876 else
508ceb09 877 printf("blocked locks:");
f99c6c54 878 TAILQ_FOREACH(range, &lock->lf_blocked, lf_link)
508ceb09
JS
879 printf("\t%lld..%lld type %s waiting on %p\n",
880 range->lf_start, range->lf_end,
881 range->lf_type == F_RDLCK ? "shared" : "exclusive",
882 range);
984263bc 883}
2c4af4c4 884#endif /* LOCKF_DEBUG */