minor code optimization.
[dragonfly.git] / sys / kern / kern_lockf.c
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *      The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Scooter Morris at Genentech Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by the University of
19  *      California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *      @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
37  * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
38  * $DragonFly: src/sys/kern/kern_lockf.c,v 1.2 2003/06/17 04:28:41 dillon Exp $
39  */
40
41 #include "opt_debug_lockf.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/proc.h>
48 #include <sys/unistd.h>
49 #include <sys/vnode.h>
50 #include <sys/malloc.h>
51 #include <sys/fcntl.h>
52
53 #include <sys/lockf.h>
54
55 /*
56  * This variable controls the maximum number of processes that will
57  * be checked in doing deadlock detection.
58  */
59 static int maxlockdepth = MAXDEPTH;
60
61 #ifdef LOCKF_DEBUG
62 #include <sys/kernel.h>
63 #include <sys/sysctl.h>
64
65 #include <ufs/ufs/quota.h>
66 #include <ufs/ufs/inode.h>
67
68
69 static int      lockf_debug = 0;
70 SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW, &lockf_debug, 0, "");
71 #endif
72
73 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
74
75 #define NOLOCKF (struct lockf *)0
76 #define SELF    0x1
77 #define OTHERS  0x2
78 static int       lf_clearlock __P((struct lockf *));
79 static int       lf_findoverlap __P((struct lockf *,
80             struct lockf *, int, struct lockf ***, struct lockf **));
81 static struct lockf *
82          lf_getblock __P((struct lockf *));
83 static int       lf_getlock __P((struct lockf *, struct flock *));
84 static int       lf_setlock __P((struct lockf *));
85 static void      lf_split __P((struct lockf *, struct lockf *));
86 static void      lf_wakelock __P((struct lockf *));
87
88 /*
89  * Advisory record locking support
90  */
91 int
92 lf_advlock(ap, head, size)
93         struct vop_advlock_args /* {
94                 struct vnode *a_vp;
95                 caddr_t  a_id;
96                 int  a_op;
97                 struct flock *a_fl;
98                 int  a_flags;
99         } */ *ap;
100         struct lockf **head;
101         u_quad_t size;
102 {
103         register struct flock *fl = ap->a_fl;
104         register struct lockf *lock;
105         off_t start, end;
106         int error;
107
108         /*
109          * Convert the flock structure into a start and end.
110          */
111         switch (fl->l_whence) {
112
113         case SEEK_SET:
114         case SEEK_CUR:
115                 /*
116                  * Caller is responsible for adding any necessary offset
117                  * when SEEK_CUR is used.
118                  */
119                 start = fl->l_start;
120                 break;
121
122         case SEEK_END:
123                 start = size + fl->l_start;
124                 break;
125
126         default:
127                 return (EINVAL);
128         }
129         if (start < 0)
130                 return (EINVAL);
131         if (fl->l_len == 0)
132                 end = -1;
133         else {
134                 end = start + fl->l_len - 1;
135                 if (end < start)
136                         return (EINVAL);
137         }
138         /*
139          * Avoid the common case of unlocking when inode has no locks.
140          */
141         if (*head == (struct lockf *)0) {
142                 if (ap->a_op != F_SETLK) {
143                         fl->l_type = F_UNLCK;
144                         return (0);
145                 }
146         }
147         /*
148          * Create the lockf structure
149          */
150         MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK);
151         lock->lf_start = start;
152         lock->lf_end = end;
153         lock->lf_id = ap->a_id;
154 /*      lock->lf_inode = ip; */ /* XXX JH */
155         lock->lf_type = fl->l_type;
156         lock->lf_head = head;
157         lock->lf_next = (struct lockf *)0;
158         TAILQ_INIT(&lock->lf_blkhd);
159         lock->lf_flags = ap->a_flags;
160         /*
161          * Do the requested operation.
162          */
163         switch(ap->a_op) {
164         case F_SETLK:
165                 return (lf_setlock(lock));
166
167         case F_UNLCK:
168                 error = lf_clearlock(lock);
169                 FREE(lock, M_LOCKF);
170                 return (error);
171
172         case F_GETLK:
173                 error = lf_getlock(lock, fl);
174                 FREE(lock, M_LOCKF);
175                 return (error);
176
177         default:
178                 free(lock, M_LOCKF);
179                 return (EINVAL);
180         }
181         /* NOTREACHED */
182 }
183
184 /*
185  * Set a byte-range lock.
186  */
187 static int
188 lf_setlock(lock)
189         register struct lockf *lock;
190 {
191         register struct lockf *block;
192         struct lockf **head = lock->lf_head;
193         struct lockf **prev, *overlap, *ltmp;
194         static char lockstr[] = "lockf";
195         int ovcase, priority, needtolink, error;
196
197 #ifdef LOCKF_DEBUG
198         if (lockf_debug & 1)
199                 lf_print("lf_setlock", lock);
200 #endif /* LOCKF_DEBUG */
201
202         /*
203          * Set the priority
204          */
205         priority = PLOCK;
206         if (lock->lf_type == F_WRLCK)
207                 priority += 4;
208         priority |= PCATCH;
209         /*
210          * Scan lock list for this file looking for locks that would block us.
211          */
212         while ((block = lf_getblock(lock))) {
213                 /*
214                  * Free the structure and return if nonblocking.
215                  */
216                 if ((lock->lf_flags & F_WAIT) == 0) {
217                         FREE(lock, M_LOCKF);
218                         return (EAGAIN);
219                 }
220                 /*
221                  * We are blocked. Since flock style locks cover
222                  * the whole file, there is no chance for deadlock.
223                  * For byte-range locks we must check for deadlock.
224                  *
225                  * Deadlock detection is done by looking through the
226                  * wait channels to see if there are any cycles that
227                  * involve us. MAXDEPTH is set just to make sure we
228                  * do not go off into neverland.
229                  */
230                 if ((lock->lf_flags & F_POSIX) &&
231                     (block->lf_flags & F_POSIX)) {
232                         register struct proc *wproc;
233                         register struct lockf *waitblock;
234                         int i = 0;
235
236                         /* The block is waiting on something */
237                         wproc = (struct proc *)block->lf_id;
238                         while (wproc->p_wchan &&
239                                (wproc->p_wmesg == lockstr) &&
240                                (i++ < maxlockdepth)) {
241                                 waitblock = (struct lockf *)wproc->p_wchan;
242                                 /* Get the owner of the blocking lock */
243                                 waitblock = waitblock->lf_next;
244                                 if ((waitblock->lf_flags & F_POSIX) == 0)
245                                         break;
246                                 wproc = (struct proc *)waitblock->lf_id;
247                                 if (wproc == (struct proc *)lock->lf_id) {
248                                         free(lock, M_LOCKF);
249                                         return (EDEADLK);
250                                 }
251                         }
252                 }
253                 /*
254                  * For flock type locks, we must first remove
255                  * any shared locks that we hold before we sleep
256                  * waiting for an exclusive lock.
257                  */
258                 if ((lock->lf_flags & F_FLOCK) &&
259                     lock->lf_type == F_WRLCK) {
260                         lock->lf_type = F_UNLCK;
261                         (void) lf_clearlock(lock);
262                         lock->lf_type = F_WRLCK;
263                 }
264                 /*
265                  * Add our lock to the blocked list and sleep until we're free.
266                  * Remember who blocked us (for deadlock detection).
267                  */
268                 lock->lf_next = block;
269                 TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block);
270 #ifdef LOCKF_DEBUG
271                 if (lockf_debug & 1) {
272                         lf_print("lf_setlock: blocking on", block);
273                         lf_printlist("lf_setlock", block);
274                 }
275 #endif /* LOCKF_DEBUG */
276                 error = tsleep((caddr_t)lock, priority, lockstr, 0);
277                 /*
278                  * We may have been awakened by a signal and/or by a
279                  * debugger continuing us (in which cases we must remove
280                  * ourselves from the blocked list) and/or by another
281                  * process releasing a lock (in which case we have
282                  * already been removed from the blocked list and our
283                  * lf_next field set to NOLOCKF).
284                  */
285                 if (lock->lf_next) {
286                         TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
287                         lock->lf_next = NOLOCKF;
288                 }
289                 if (error) {
290                         free(lock, M_LOCKF);
291                         return (error);
292                 }
293         }
294         /*
295          * No blocks!!  Add the lock.  Note that we will
296          * downgrade or upgrade any overlapping locks this
297          * process already owns.
298          *
299          * Skip over locks owned by other processes.
300          * Handle any locks that overlap and are owned by ourselves.
301          */
302         prev = head;
303         block = *head;
304         needtolink = 1;
305         for (;;) {
306                 ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap);
307                 if (ovcase)
308                         block = overlap->lf_next;
309                 /*
310                  * Six cases:
311                  *      0) no overlap
312                  *      1) overlap == lock
313                  *      2) overlap contains lock
314                  *      3) lock contains overlap
315                  *      4) overlap starts before lock
316                  *      5) overlap ends after lock
317                  */
318                 switch (ovcase) {
319                 case 0: /* no overlap */
320                         if (needtolink) {
321                                 *prev = lock;
322                                 lock->lf_next = overlap;
323                         }
324                         break;
325
326                 case 1: /* overlap == lock */
327                         /*
328                          * If downgrading lock, others may be
329                          * able to acquire it.
330                          */
331                         if (lock->lf_type == F_RDLCK &&
332                             overlap->lf_type == F_WRLCK)
333                                 lf_wakelock(overlap);
334                         overlap->lf_type = lock->lf_type;
335                         FREE(lock, M_LOCKF);
336                         lock = overlap; /* for debug output below */
337                         break;
338
339                 case 2: /* overlap contains lock */
340                         /*
341                          * Check for common starting point and different types.
342                          */
343                         if (overlap->lf_type == lock->lf_type) {
344                                 free(lock, M_LOCKF);
345                                 lock = overlap; /* for debug output below */
346                                 break;
347                         }
348                         if (overlap->lf_start == lock->lf_start) {
349                                 *prev = lock;
350                                 lock->lf_next = overlap;
351                                 overlap->lf_start = lock->lf_end + 1;
352                         } else
353                                 lf_split(overlap, lock);
354                         lf_wakelock(overlap);
355                         break;
356
357                 case 3: /* lock contains overlap */
358                         /*
359                          * If downgrading lock, others may be able to
360                          * acquire it, otherwise take the list.
361                          */
362                         if (lock->lf_type == F_RDLCK &&
363                             overlap->lf_type == F_WRLCK) {
364                                 lf_wakelock(overlap);
365                         } else {
366                                 while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
367                                         ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
368                                         TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
369                                             lf_block);
370                                         TAILQ_INSERT_TAIL(&lock->lf_blkhd,
371                                             ltmp, lf_block);
372                                         ltmp->lf_next = lock;
373                                 }
374                         }
375                         /*
376                          * Add the new lock if necessary and delete the overlap.
377                          */
378                         if (needtolink) {
379                                 *prev = lock;
380                                 lock->lf_next = overlap->lf_next;
381                                 prev = &lock->lf_next;
382                                 needtolink = 0;
383                         } else
384                                 *prev = overlap->lf_next;
385                         free(overlap, M_LOCKF);
386                         continue;
387
388                 case 4: /* overlap starts before lock */
389                         /*
390                          * Add lock after overlap on the list.
391                          */
392                         lock->lf_next = overlap->lf_next;
393                         overlap->lf_next = lock;
394                         overlap->lf_end = lock->lf_start - 1;
395                         prev = &lock->lf_next;
396                         lf_wakelock(overlap);
397                         needtolink = 0;
398                         continue;
399
400                 case 5: /* overlap ends after lock */
401                         /*
402                          * Add the new lock before overlap.
403                          */
404                         if (needtolink) {
405                                 *prev = lock;
406                                 lock->lf_next = overlap;
407                         }
408                         overlap->lf_start = lock->lf_end + 1;
409                         lf_wakelock(overlap);
410                         break;
411                 }
412                 break;
413         }
414 #ifdef LOCKF_DEBUG
415         if (lockf_debug & 1) {
416                 lf_print("lf_setlock: got the lock", lock);
417                 lf_printlist("lf_setlock", lock);
418         }
419 #endif /* LOCKF_DEBUG */
420         return (0);
421 }
422
423 /*
424  * Remove a byte-range lock on an inode.
425  *
426  * Generally, find the lock (or an overlap to that lock)
427  * and remove it (or shrink it), then wakeup anyone we can.
428  */
429 static int
430 lf_clearlock(unlock)
431         register struct lockf *unlock;
432 {
433         struct lockf **head = unlock->lf_head;
434         register struct lockf *lf = *head;
435         struct lockf *overlap, **prev;
436         int ovcase;
437
438         if (lf == NOLOCKF)
439                 return (0);
440 #ifdef LOCKF_DEBUG
441         if (unlock->lf_type != F_UNLCK)
442                 panic("lf_clearlock: bad type");
443         if (lockf_debug & 1)
444                 lf_print("lf_clearlock", unlock);
445 #endif /* LOCKF_DEBUG */
446         prev = head;
447         while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap))) {
448                 /*
449                  * Wakeup the list of locks to be retried.
450                  */
451                 lf_wakelock(overlap);
452
453                 switch (ovcase) {
454
455                 case 1: /* overlap == lock */
456                         *prev = overlap->lf_next;
457                         FREE(overlap, M_LOCKF);
458                         break;
459
460                 case 2: /* overlap contains lock: split it */
461                         if (overlap->lf_start == unlock->lf_start) {
462                                 overlap->lf_start = unlock->lf_end + 1;
463                                 break;
464                         }
465                         lf_split(overlap, unlock);
466                         overlap->lf_next = unlock->lf_next;
467                         break;
468
469                 case 3: /* lock contains overlap */
470                         *prev = overlap->lf_next;
471                         lf = overlap->lf_next;
472                         free(overlap, M_LOCKF);
473                         continue;
474
475                 case 4: /* overlap starts before lock */
476                         overlap->lf_end = unlock->lf_start - 1;
477                         prev = &overlap->lf_next;
478                         lf = overlap->lf_next;
479                         continue;
480
481                 case 5: /* overlap ends after lock */
482                         overlap->lf_start = unlock->lf_end + 1;
483                         break;
484                 }
485                 break;
486         }
487 #ifdef LOCKF_DEBUG
488         if (lockf_debug & 1)
489                 lf_printlist("lf_clearlock", unlock);
490 #endif /* LOCKF_DEBUG */
491         return (0);
492 }
493
494 /*
495  * Check whether there is a blocking lock,
496  * and if so return its process identifier.
497  */
498 static int
499 lf_getlock(lock, fl)
500         register struct lockf *lock;
501         register struct flock *fl;
502 {
503         register struct lockf *block;
504
505 #ifdef LOCKF_DEBUG
506         if (lockf_debug & 1)
507                 lf_print("lf_getlock", lock);
508 #endif /* LOCKF_DEBUG */
509
510         if ((block = lf_getblock(lock))) {
511                 fl->l_type = block->lf_type;
512                 fl->l_whence = SEEK_SET;
513                 fl->l_start = block->lf_start;
514                 if (block->lf_end == -1)
515                         fl->l_len = 0;
516                 else
517                         fl->l_len = block->lf_end - block->lf_start + 1;
518                 if (block->lf_flags & F_POSIX)
519                         fl->l_pid = ((struct proc *)(block->lf_id))->p_pid;
520                 else
521                         fl->l_pid = -1;
522         } else {
523                 fl->l_type = F_UNLCK;
524         }
525         return (0);
526 }
527
528 /*
529  * Walk the list of locks for an inode and
530  * return the first blocking lock.
531  */
532 static struct lockf *
533 lf_getblock(lock)
534         register struct lockf *lock;
535 {
536         struct lockf **prev, *overlap, *lf = *(lock->lf_head);
537         int ovcase;
538
539         prev = lock->lf_head;
540         while ((ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap))) {
541                 /*
542                  * We've found an overlap, see if it blocks us
543                  */
544                 if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK))
545                         return (overlap);
546                 /*
547                  * Nope, point to the next one on the list and
548                  * see if it blocks us
549                  */
550                 lf = overlap->lf_next;
551         }
552         return (NOLOCKF);
553 }
554
555 /*
556  * Walk the list of locks for an inode to
557  * find an overlapping lock (if any).
558  *
559  * NOTE: this returns only the FIRST overlapping lock.  There
560  *       may be more than one.
561  */
562 static int
563 lf_findoverlap(lf, lock, type, prev, overlap)
564         register struct lockf *lf;
565         struct lockf *lock;
566         int type;
567         struct lockf ***prev;
568         struct lockf **overlap;
569 {
570         off_t start, end;
571
572         *overlap = lf;
573         if (lf == NOLOCKF)
574                 return (0);
575 #ifdef LOCKF_DEBUG
576         if (lockf_debug & 2)
577                 lf_print("lf_findoverlap: looking for overlap in", lock);
578 #endif /* LOCKF_DEBUG */
579         start = lock->lf_start;
580         end = lock->lf_end;
581         while (lf != NOLOCKF) {
582                 if (((type & SELF) && lf->lf_id != lock->lf_id) ||
583                     ((type & OTHERS) && lf->lf_id == lock->lf_id)) {
584                         *prev = &lf->lf_next;
585                         *overlap = lf = lf->lf_next;
586                         continue;
587                 }
588 #ifdef LOCKF_DEBUG
589                 if (lockf_debug & 2)
590                         lf_print("\tchecking", lf);
591 #endif /* LOCKF_DEBUG */
592                 /*
593                  * OK, check for overlap
594                  *
595                  * Six cases:
596                  *      0) no overlap
597                  *      1) overlap == lock
598                  *      2) overlap contains lock
599                  *      3) lock contains overlap
600                  *      4) overlap starts before lock
601                  *      5) overlap ends after lock
602                  */
603                 if ((lf->lf_end != -1 && start > lf->lf_end) ||
604                     (end != -1 && lf->lf_start > end)) {
605                         /* Case 0 */
606 #ifdef LOCKF_DEBUG
607                         if (lockf_debug & 2)
608                                 printf("no overlap\n");
609 #endif /* LOCKF_DEBUG */
610                         if ((type & SELF) && end != -1 && lf->lf_start > end)
611                                 return (0);
612                         *prev = &lf->lf_next;
613                         *overlap = lf = lf->lf_next;
614                         continue;
615                 }
616                 if ((lf->lf_start == start) && (lf->lf_end == end)) {
617                         /* Case 1 */
618 #ifdef LOCKF_DEBUG
619                         if (lockf_debug & 2)
620                                 printf("overlap == lock\n");
621 #endif /* LOCKF_DEBUG */
622                         return (1);
623                 }
624                 if ((lf->lf_start <= start) &&
625                     (end != -1) &&
626                     ((lf->lf_end >= end) || (lf->lf_end == -1))) {
627                         /* Case 2 */
628 #ifdef LOCKF_DEBUG
629                         if (lockf_debug & 2)
630                                 printf("overlap contains lock\n");
631 #endif /* LOCKF_DEBUG */
632                         return (2);
633                 }
634                 if (start <= lf->lf_start &&
635                            (end == -1 ||
636                            (lf->lf_end != -1 && end >= lf->lf_end))) {
637                         /* Case 3 */
638 #ifdef LOCKF_DEBUG
639                         if (lockf_debug & 2)
640                                 printf("lock contains overlap\n");
641 #endif /* LOCKF_DEBUG */
642                         return (3);
643                 }
644                 if ((lf->lf_start < start) &&
645                         ((lf->lf_end >= start) || (lf->lf_end == -1))) {
646                         /* Case 4 */
647 #ifdef LOCKF_DEBUG
648                         if (lockf_debug & 2)
649                                 printf("overlap starts before lock\n");
650 #endif /* LOCKF_DEBUG */
651                         return (4);
652                 }
653                 if ((lf->lf_start > start) &&
654                         (end != -1) &&
655                         ((lf->lf_end > end) || (lf->lf_end == -1))) {
656                         /* Case 5 */
657 #ifdef LOCKF_DEBUG
658                         if (lockf_debug & 2)
659                                 printf("overlap ends after lock\n");
660 #endif /* LOCKF_DEBUG */
661                         return (5);
662                 }
663                 panic("lf_findoverlap: default");
664         }
665         return (0);
666 }
667
668 /*
669  * Split a lock and a contained region into
670  * two or three locks as necessary.
671  */
672 static void
673 lf_split(lock1, lock2)
674         register struct lockf *lock1;
675         register struct lockf *lock2;
676 {
677         register struct lockf *splitlock;
678
679 #ifdef LOCKF_DEBUG
680         if (lockf_debug & 2) {
681                 lf_print("lf_split", lock1);
682                 lf_print("splitting from", lock2);
683         }
684 #endif /* LOCKF_DEBUG */
685         /*
686          * Check to see if spliting into only two pieces.
687          */
688         if (lock1->lf_start == lock2->lf_start) {
689                 lock1->lf_start = lock2->lf_end + 1;
690                 lock2->lf_next = lock1;
691                 return;
692         }
693         if (lock1->lf_end == lock2->lf_end) {
694                 lock1->lf_end = lock2->lf_start - 1;
695                 lock2->lf_next = lock1->lf_next;
696                 lock1->lf_next = lock2;
697                 return;
698         }
699         /*
700          * Make a new lock consisting of the last part of
701          * the encompassing lock
702          */
703         MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK);
704         bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock);
705         splitlock->lf_start = lock2->lf_end + 1;
706         TAILQ_INIT(&splitlock->lf_blkhd);
707         lock1->lf_end = lock2->lf_start - 1;
708         /*
709          * OK, now link it in
710          */
711         splitlock->lf_next = lock1->lf_next;
712         lock2->lf_next = splitlock;
713         lock1->lf_next = lock2;
714 }
715
716 /*
717  * Wakeup a blocklist
718  */
719 static void
720 lf_wakelock(listhead)
721         struct lockf *listhead;
722 {
723         register struct lockf *wakelock;
724
725         while (!TAILQ_EMPTY(&listhead->lf_blkhd)) {
726                 wakelock = TAILQ_FIRST(&listhead->lf_blkhd);
727                 TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block);
728                 wakelock->lf_next = NOLOCKF;
729 #ifdef LOCKF_DEBUG
730                 if (lockf_debug & 2)
731                         lf_print("lf_wakelock: awakening", wakelock);
732 #endif /* LOCKF_DEBUG */
733                 wakeup((caddr_t)wakelock);
734         }
735 }
736
737 #ifdef LOCKF_DEBUG
738 /*
739  * Print out a lock.
740  */
741 void
742 lf_print(tag, lock)
743         char *tag;
744         register struct lockf *lock;
745 {
746
747         printf("%s: lock %p for ", tag, (void *)lock);
748         if (lock->lf_flags & F_POSIX)
749                 printf("proc %ld", (long)((struct proc *)lock->lf_id)->p_pid);
750         else
751                 printf("id %p", (void *)lock->lf_id);
752         /* XXX no %qd in kernel.  Truncate. */
753         printf(" in ino %lu on dev <%d, %d>, %s, start %ld, end %ld",
754             (u_long)lock->lf_inode->i_number,
755             major(lock->lf_inode->i_dev),
756             minor(lock->lf_inode->i_dev),
757             lock->lf_type == F_RDLCK ? "shared" :
758             lock->lf_type == F_WRLCK ? "exclusive" :
759             lock->lf_type == F_UNLCK ? "unlock" :
760             "unknown", (long)lock->lf_start, (long)lock->lf_end);
761         if (!TAILQ_EMPTY(&lock->lf_blkhd))
762                 printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd));
763         else
764                 printf("\n");
765 }
766
767 void
768 lf_printlist(tag, lock)
769         char *tag;
770         struct lockf *lock;
771 {
772         register struct lockf *lf, *blk;
773
774         printf("%s: Lock list for ino %lu on dev <%d, %d>:\n",
775             tag, (u_long)lock->lf_inode->i_number,
776             major(lock->lf_inode->i_dev),
777             minor(lock->lf_inode->i_dev));
778         for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) {
779                 printf("\tlock %p for ",(void *)lf);
780                 if (lf->lf_flags & F_POSIX)
781                         printf("proc %ld",
782                             (long)((struct proc *)lf->lf_id)->p_pid);
783                 else
784                         printf("id %p", (void *)lf->lf_id);
785                 /* XXX no %qd in kernel.  Truncate. */
786                 printf(", %s, start %ld, end %ld",
787                     lf->lf_type == F_RDLCK ? "shared" :
788                     lf->lf_type == F_WRLCK ? "exclusive" :
789                     lf->lf_type == F_UNLCK ? "unlock" :
790                     "unknown", (long)lf->lf_start, (long)lf->lf_end);
791                 TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) {
792                         printf("\n\t\tlock request %p for ", (void *)blk);
793                         if (blk->lf_flags & F_POSIX)
794                                 printf("proc %ld",
795                                     (long)((struct proc *)blk->lf_id)->p_pid);
796                         else
797                                 printf("id %p", (void *)blk->lf_id);
798                         /* XXX no %qd in kernel.  Truncate. */
799                         printf(", %s, start %ld, end %ld",
800                             blk->lf_type == F_RDLCK ? "shared" :
801                             blk->lf_type == F_WRLCK ? "exclusive" :
802                             blk->lf_type == F_UNLCK ? "unlock" :
803                             "unknown", (long)blk->lf_start,
804                             (long)blk->lf_end);
805                         if (!TAILQ_EMPTY(&blk->lf_blkhd))
806                                 panic("lf_printlist: bad list");
807                 }
808                 printf("\n");
809         }
810 }
811 #endif /* LOCKF_DEBUG */