Commit | Line | Data |
---|---|---|
984263bc MD |
1 | /* |
2 | * Copyright (c) 1995 | |
3 | * The Regents of the University of California. All rights reserved. | |
4 | * | |
5 | * Copyright (C) 1997 | |
6 | * John S. Dyson. All rights reserved. | |
7 | * | |
8 | * This code contains ideas from software contributed to Berkeley by | |
9 | * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating | |
10 | * System project at Carnegie-Mellon University. | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or without | |
13 | * modification, are permitted provided that the following conditions | |
14 | * are met: | |
15 | * 1. Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * 2. Redistributions in binary form must reproduce the above copyright | |
18 | * notice, this list of conditions and the following disclaimer in the | |
19 | * documentation and/or other materials provided with the distribution. | |
20 | * 3. All advertising materials mentioning features or use of this software | |
21 | * must display the following acknowledgement: | |
22 | * This product includes software developed by the University of | |
23 | * California, Berkeley and its contributors. | |
24 | * 4. Neither the name of the University nor the names of its contributors | |
25 | * may be used to endorse or promote products derived from this software | |
26 | * without specific prior written permission. | |
27 | * | |
28 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
29 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
30 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
31 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
32 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
33 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
34 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
35 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
36 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
37 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
38 | * SUCH DAMAGE. | |
39 | * | |
40 | * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 | |
41 | * $FreeBSD: src/sys/kern/kern_lock.c,v 1.31.2.3 2001/12/25 01:44:44 dillon Exp $ | |
1de703da | 42 | * $DragonFly: src/sys/kern/kern_lock.c,v 1.2 2003/06/17 04:28:41 dillon Exp $ |
984263bc MD |
43 | */ |
44 | ||
45 | #include "opt_lint.h" | |
46 | ||
47 | #include <sys/param.h> | |
48 | #include <sys/proc.h> | |
49 | #include <sys/lock.h> | |
50 | #include <sys/systm.h> | |
51 | ||
52 | /* | |
53 | * Locking primitives implementation. | |
54 | * Locks provide shared/exclusive sychronization. | |
55 | */ | |
56 | ||
57 | #ifdef SIMPLELOCK_DEBUG | |
58 | #define COUNT(p, x) if (p) (p)->p_locks += (x) | |
59 | #else | |
60 | #define COUNT(p, x) | |
61 | #endif | |
62 | ||
63 | #define LOCK_WAIT_TIME 100 | |
64 | #define LOCK_SAMPLE_WAIT 7 | |
65 | ||
66 | #if defined(DIAGNOSTIC) | |
67 | #define LOCK_INLINE | |
68 | #else | |
69 | #define LOCK_INLINE __inline | |
70 | #endif | |
71 | ||
72 | #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ | |
73 | LK_SHARE_NONZERO | LK_WAIT_NONZERO) | |
74 | ||
75 | static int acquire(struct lock *lkp, int extflags, int wanted); | |
76 | static int apause(struct lock *lkp, int flags); | |
77 | static int acquiredrain(struct lock *lkp, int extflags) ; | |
78 | ||
79 | static LOCK_INLINE void | |
80 | sharelock(struct lock *lkp, int incr) { | |
81 | lkp->lk_flags |= LK_SHARE_NONZERO; | |
82 | lkp->lk_sharecount += incr; | |
83 | } | |
84 | ||
85 | static LOCK_INLINE void | |
86 | shareunlock(struct lock *lkp, int decr) { | |
87 | ||
88 | KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); | |
89 | ||
90 | if (lkp->lk_sharecount == decr) { | |
91 | lkp->lk_flags &= ~LK_SHARE_NONZERO; | |
92 | if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { | |
93 | wakeup(lkp); | |
94 | } | |
95 | lkp->lk_sharecount = 0; | |
96 | } else { | |
97 | lkp->lk_sharecount -= decr; | |
98 | } | |
99 | } | |
100 | ||
101 | /* | |
102 | * This is the waitloop optimization, and note for this to work | |
103 | * simple_lock and simple_unlock should be subroutines to avoid | |
104 | * optimization troubles. | |
105 | */ | |
106 | static int | |
107 | apause(struct lock *lkp, int flags) | |
108 | { | |
109 | #ifdef SMP | |
110 | int i, lock_wait; | |
111 | #endif | |
112 | ||
113 | if ((lkp->lk_flags & flags) == 0) | |
114 | return 0; | |
115 | #ifdef SMP | |
116 | for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) { | |
117 | simple_unlock(&lkp->lk_interlock); | |
118 | for (i = LOCK_SAMPLE_WAIT; i > 0; i--) | |
119 | if ((lkp->lk_flags & flags) == 0) | |
120 | break; | |
121 | simple_lock(&lkp->lk_interlock); | |
122 | if ((lkp->lk_flags & flags) == 0) | |
123 | return 0; | |
124 | } | |
125 | #endif | |
126 | return 1; | |
127 | } | |
128 | ||
129 | static int | |
130 | acquire(struct lock *lkp, int extflags, int wanted) { | |
131 | int s, error; | |
132 | ||
133 | if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) { | |
134 | return EBUSY; | |
135 | } | |
136 | ||
137 | if (((lkp->lk_flags | extflags) & LK_NOPAUSE) == 0) { | |
138 | error = apause(lkp, wanted); | |
139 | if (error == 0) | |
140 | return 0; | |
141 | } | |
142 | ||
143 | s = splhigh(); | |
144 | while ((lkp->lk_flags & wanted) != 0) { | |
145 | lkp->lk_flags |= LK_WAIT_NONZERO; | |
146 | lkp->lk_waitcount++; | |
147 | simple_unlock(&lkp->lk_interlock); | |
148 | error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, | |
149 | ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); | |
150 | simple_lock(&lkp->lk_interlock); | |
151 | if (lkp->lk_waitcount == 1) { | |
152 | lkp->lk_flags &= ~LK_WAIT_NONZERO; | |
153 | lkp->lk_waitcount = 0; | |
154 | } else { | |
155 | lkp->lk_waitcount--; | |
156 | } | |
157 | if (error) { | |
158 | splx(s); | |
159 | return error; | |
160 | } | |
161 | if (extflags & LK_SLEEPFAIL) { | |
162 | splx(s); | |
163 | return ENOLCK; | |
164 | } | |
165 | } | |
166 | splx(s); | |
167 | return 0; | |
168 | } | |
169 | ||
170 | /* | |
171 | * Set, change, or release a lock. | |
172 | * | |
173 | * Shared requests increment the shared count. Exclusive requests set the | |
174 | * LK_WANT_EXCL flag (preventing further shared locks), and wait for already | |
175 | * accepted shared locks and shared-to-exclusive upgrades to go away. | |
176 | */ | |
177 | int | |
178 | #ifndef DEBUG_LOCKS | |
179 | lockmgr(lkp, flags, interlkp, p) | |
180 | #else | |
181 | debuglockmgr(lkp, flags, interlkp, p, name, file, line) | |
182 | #endif | |
183 | struct lock *lkp; | |
184 | u_int flags; | |
185 | struct simplelock *interlkp; | |
186 | struct proc *p; | |
187 | #ifdef DEBUG_LOCKS | |
188 | const char *name; /* Name of lock function */ | |
189 | const char *file; /* Name of file call is from */ | |
190 | int line; /* Line number in file */ | |
191 | #endif | |
192 | { | |
193 | int error; | |
194 | pid_t pid; | |
195 | int extflags; | |
196 | ||
197 | error = 0; | |
198 | if (p == NULL) | |
199 | pid = LK_KERNPROC; | |
200 | else | |
201 | pid = p->p_pid; | |
202 | ||
203 | simple_lock(&lkp->lk_interlock); | |
204 | if (flags & LK_INTERLOCK) | |
205 | simple_unlock(interlkp); | |
206 | ||
207 | extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; | |
208 | ||
209 | switch (flags & LK_TYPE_MASK) { | |
210 | ||
211 | case LK_SHARED: | |
212 | /* | |
213 | * If we are not the exclusive lock holder, we have to block | |
214 | * while there is an exclusive lock holder or while an | |
215 | * exclusive lock request or upgrade request is in progress. | |
216 | * | |
217 | * However, if P_DEADLKTREAT is set, we override exclusive | |
218 | * lock requests or upgrade requests ( but not the exclusive | |
219 | * lock itself ). | |
220 | */ | |
221 | if (lkp->lk_lockholder != pid) { | |
222 | if (p && (p->p_flag & P_DEADLKTREAT)) { | |
223 | error = acquire( | |
224 | lkp, | |
225 | extflags, | |
226 | LK_HAVE_EXCL | |
227 | ); | |
228 | } else { | |
229 | error = acquire( | |
230 | lkp, | |
231 | extflags, | |
232 | LK_HAVE_EXCL | LK_WANT_EXCL | | |
233 | LK_WANT_UPGRADE | |
234 | ); | |
235 | } | |
236 | if (error) | |
237 | break; | |
238 | sharelock(lkp, 1); | |
239 | COUNT(p, 1); | |
240 | break; | |
241 | } | |
242 | /* | |
243 | * We hold an exclusive lock, so downgrade it to shared. | |
244 | * An alternative would be to fail with EDEADLK. | |
245 | */ | |
246 | sharelock(lkp, 1); | |
247 | COUNT(p, 1); | |
248 | /* fall into downgrade */ | |
249 | ||
250 | case LK_DOWNGRADE: | |
251 | if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0) | |
252 | panic("lockmgr: not holding exclusive lock"); | |
253 | sharelock(lkp, lkp->lk_exclusivecount); | |
254 | lkp->lk_exclusivecount = 0; | |
255 | lkp->lk_flags &= ~LK_HAVE_EXCL; | |
256 | lkp->lk_lockholder = LK_NOPROC; | |
257 | if (lkp->lk_waitcount) | |
258 | wakeup((void *)lkp); | |
259 | break; | |
260 | ||
261 | case LK_EXCLUPGRADE: | |
262 | /* | |
263 | * If another process is ahead of us to get an upgrade, | |
264 | * then we want to fail rather than have an intervening | |
265 | * exclusive access. | |
266 | */ | |
267 | if (lkp->lk_flags & LK_WANT_UPGRADE) { | |
268 | shareunlock(lkp, 1); | |
269 | COUNT(p, -1); | |
270 | error = EBUSY; | |
271 | break; | |
272 | } | |
273 | /* fall into normal upgrade */ | |
274 | ||
275 | case LK_UPGRADE: | |
276 | /* | |
277 | * Upgrade a shared lock to an exclusive one. If another | |
278 | * shared lock has already requested an upgrade to an | |
279 | * exclusive lock, our shared lock is released and an | |
280 | * exclusive lock is requested (which will be granted | |
281 | * after the upgrade). If we return an error, the file | |
282 | * will always be unlocked. | |
283 | */ | |
284 | if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0)) | |
285 | panic("lockmgr: upgrade exclusive lock"); | |
286 | shareunlock(lkp, 1); | |
287 | COUNT(p, -1); | |
288 | /* | |
289 | * If we are just polling, check to see if we will block. | |
290 | */ | |
291 | if ((extflags & LK_NOWAIT) && | |
292 | ((lkp->lk_flags & LK_WANT_UPGRADE) || | |
293 | lkp->lk_sharecount > 1)) { | |
294 | error = EBUSY; | |
295 | break; | |
296 | } | |
297 | if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { | |
298 | /* | |
299 | * We are first shared lock to request an upgrade, so | |
300 | * request upgrade and wait for the shared count to | |
301 | * drop to zero, then take exclusive lock. | |
302 | */ | |
303 | lkp->lk_flags |= LK_WANT_UPGRADE; | |
304 | error = acquire(lkp, extflags, LK_SHARE_NONZERO); | |
305 | lkp->lk_flags &= ~LK_WANT_UPGRADE; | |
306 | ||
307 | if (error) | |
308 | break; | |
309 | lkp->lk_flags |= LK_HAVE_EXCL; | |
310 | lkp->lk_lockholder = pid; | |
311 | if (lkp->lk_exclusivecount != 0) | |
312 | panic("lockmgr: non-zero exclusive count"); | |
313 | lkp->lk_exclusivecount = 1; | |
314 | #if defined(DEBUG_LOCKS) | |
315 | lkp->lk_filename = file; | |
316 | lkp->lk_lineno = line; | |
317 | lkp->lk_lockername = name; | |
318 | #endif | |
319 | COUNT(p, 1); | |
320 | break; | |
321 | } | |
322 | /* | |
323 | * Someone else has requested upgrade. Release our shared | |
324 | * lock, awaken upgrade requestor if we are the last shared | |
325 | * lock, then request an exclusive lock. | |
326 | */ | |
327 | if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == | |
328 | LK_WAIT_NONZERO) | |
329 | wakeup((void *)lkp); | |
330 | /* fall into exclusive request */ | |
331 | ||
332 | case LK_EXCLUSIVE: | |
333 | if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) { | |
334 | /* | |
335 | * Recursive lock. | |
336 | */ | |
337 | if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) | |
338 | panic("lockmgr: locking against myself"); | |
339 | if ((extflags & LK_CANRECURSE) != 0) { | |
340 | lkp->lk_exclusivecount++; | |
341 | COUNT(p, 1); | |
342 | break; | |
343 | } | |
344 | } | |
345 | /* | |
346 | * If we are just polling, check to see if we will sleep. | |
347 | */ | |
348 | if ((extflags & LK_NOWAIT) && | |
349 | (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { | |
350 | error = EBUSY; | |
351 | break; | |
352 | } | |
353 | /* | |
354 | * Try to acquire the want_exclusive flag. | |
355 | */ | |
356 | error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL)); | |
357 | if (error) | |
358 | break; | |
359 | lkp->lk_flags |= LK_WANT_EXCL; | |
360 | /* | |
361 | * Wait for shared locks and upgrades to finish. | |
362 | */ | |
363 | error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO); | |
364 | lkp->lk_flags &= ~LK_WANT_EXCL; | |
365 | if (error) | |
366 | break; | |
367 | lkp->lk_flags |= LK_HAVE_EXCL; | |
368 | lkp->lk_lockholder = pid; | |
369 | if (lkp->lk_exclusivecount != 0) | |
370 | panic("lockmgr: non-zero exclusive count"); | |
371 | lkp->lk_exclusivecount = 1; | |
372 | #if defined(DEBUG_LOCKS) | |
373 | lkp->lk_filename = file; | |
374 | lkp->lk_lineno = line; | |
375 | lkp->lk_lockername = name; | |
376 | #endif | |
377 | COUNT(p, 1); | |
378 | break; | |
379 | ||
380 | case LK_RELEASE: | |
381 | if (lkp->lk_exclusivecount != 0) { | |
382 | if (lkp->lk_lockholder != pid && | |
383 | lkp->lk_lockholder != LK_KERNPROC) { | |
384 | panic("lockmgr: pid %d, not %s %d unlocking", | |
385 | pid, "exclusive lock holder", | |
386 | lkp->lk_lockholder); | |
387 | } | |
388 | if (lkp->lk_lockholder != LK_KERNPROC) { | |
389 | COUNT(p, -1); | |
390 | } | |
391 | if (lkp->lk_exclusivecount == 1) { | |
392 | lkp->lk_flags &= ~LK_HAVE_EXCL; | |
393 | lkp->lk_lockholder = LK_NOPROC; | |
394 | lkp->lk_exclusivecount = 0; | |
395 | } else { | |
396 | lkp->lk_exclusivecount--; | |
397 | } | |
398 | } else if (lkp->lk_flags & LK_SHARE_NONZERO) { | |
399 | shareunlock(lkp, 1); | |
400 | COUNT(p, -1); | |
401 | } | |
402 | if (lkp->lk_flags & LK_WAIT_NONZERO) | |
403 | wakeup((void *)lkp); | |
404 | break; | |
405 | ||
406 | case LK_DRAIN: | |
407 | /* | |
408 | * Check that we do not already hold the lock, as it can | |
409 | * never drain if we do. Unfortunately, we have no way to | |
410 | * check for holding a shared lock, but at least we can | |
411 | * check for an exclusive one. | |
412 | */ | |
413 | if (lkp->lk_lockholder == pid) | |
414 | panic("lockmgr: draining against myself"); | |
415 | ||
416 | error = acquiredrain(lkp, extflags); | |
417 | if (error) | |
418 | break; | |
419 | lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; | |
420 | lkp->lk_lockholder = pid; | |
421 | lkp->lk_exclusivecount = 1; | |
422 | #if defined(DEBUG_LOCKS) | |
423 | lkp->lk_filename = file; | |
424 | lkp->lk_lineno = line; | |
425 | lkp->lk_lockername = name; | |
426 | #endif | |
427 | COUNT(p, 1); | |
428 | break; | |
429 | ||
430 | default: | |
431 | simple_unlock(&lkp->lk_interlock); | |
432 | panic("lockmgr: unknown locktype request %d", | |
433 | flags & LK_TYPE_MASK); | |
434 | /* NOTREACHED */ | |
435 | } | |
436 | if ((lkp->lk_flags & LK_WAITDRAIN) && | |
437 | (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | | |
438 | LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { | |
439 | lkp->lk_flags &= ~LK_WAITDRAIN; | |
440 | wakeup((void *)&lkp->lk_flags); | |
441 | } | |
442 | simple_unlock(&lkp->lk_interlock); | |
443 | return (error); | |
444 | } | |
445 | ||
446 | static int | |
447 | acquiredrain(struct lock *lkp, int extflags) { | |
448 | int error; | |
449 | ||
450 | if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { | |
451 | return EBUSY; | |
452 | } | |
453 | ||
454 | error = apause(lkp, LK_ALL); | |
455 | if (error == 0) | |
456 | return 0; | |
457 | ||
458 | while (lkp->lk_flags & LK_ALL) { | |
459 | lkp->lk_flags |= LK_WAITDRAIN; | |
460 | simple_unlock(&lkp->lk_interlock); | |
461 | error = tsleep(&lkp->lk_flags, lkp->lk_prio, | |
462 | lkp->lk_wmesg, | |
463 | ((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0)); | |
464 | simple_lock(&lkp->lk_interlock); | |
465 | if (error) | |
466 | return error; | |
467 | if (extflags & LK_SLEEPFAIL) { | |
468 | return ENOLCK; | |
469 | } | |
470 | } | |
471 | return 0; | |
472 | } | |
473 | ||
474 | /* | |
475 | * Initialize a lock; required before use. | |
476 | */ | |
477 | void | |
478 | lockinit(lkp, prio, wmesg, timo, flags) | |
479 | struct lock *lkp; | |
480 | int prio; | |
481 | char *wmesg; | |
482 | int timo; | |
483 | int flags; | |
484 | { | |
485 | ||
486 | simple_lock_init(&lkp->lk_interlock); | |
487 | lkp->lk_flags = (flags & LK_EXTFLG_MASK); | |
488 | lkp->lk_sharecount = 0; | |
489 | lkp->lk_waitcount = 0; | |
490 | lkp->lk_exclusivecount = 0; | |
491 | lkp->lk_prio = prio; | |
492 | lkp->lk_wmesg = wmesg; | |
493 | lkp->lk_timo = timo; | |
494 | lkp->lk_lockholder = LK_NOPROC; | |
495 | } | |
496 | ||
497 | /* | |
498 | * Determine the status of a lock. | |
499 | */ | |
500 | int | |
501 | lockstatus(lkp, p) | |
502 | struct lock *lkp; | |
503 | struct proc *p; | |
504 | { | |
505 | int lock_type = 0; | |
506 | ||
507 | simple_lock(&lkp->lk_interlock); | |
508 | if (lkp->lk_exclusivecount != 0) { | |
509 | if (p == NULL || lkp->lk_lockholder == p->p_pid) | |
510 | lock_type = LK_EXCLUSIVE; | |
511 | else | |
512 | lock_type = LK_EXCLOTHER; | |
513 | } else if (lkp->lk_sharecount != 0) | |
514 | lock_type = LK_SHARED; | |
515 | simple_unlock(&lkp->lk_interlock); | |
516 | return (lock_type); | |
517 | } | |
518 | ||
519 | /* | |
520 | * Determine the number of holders of a lock. | |
521 | */ | |
522 | int | |
523 | lockcount(lkp) | |
524 | struct lock *lkp; | |
525 | { | |
526 | int count; | |
527 | ||
528 | simple_lock(&lkp->lk_interlock); | |
529 | count = lkp->lk_exclusivecount + lkp->lk_sharecount; | |
530 | simple_unlock(&lkp->lk_interlock); | |
531 | return (count); | |
532 | } | |
533 | ||
534 | /* | |
535 | * Print out information about state of a lock. Used by VOP_PRINT | |
536 | * routines to display status about contained locks. | |
537 | */ | |
538 | void | |
539 | lockmgr_printinfo(lkp) | |
540 | struct lock *lkp; | |
541 | { | |
542 | ||
543 | if (lkp->lk_sharecount) | |
544 | printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, | |
545 | lkp->lk_sharecount); | |
546 | else if (lkp->lk_flags & LK_HAVE_EXCL) | |
547 | printf(" lock type %s: EXCL (count %d) by pid %d", | |
548 | lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); | |
549 | if (lkp->lk_waitcount > 0) | |
550 | printf(" with %d pending", lkp->lk_waitcount); | |
551 | } | |
552 | ||
553 | #if defined(SIMPLELOCK_DEBUG) && (MAXCPU == 1 || defined(COMPILING_LINT)) | |
554 | #include <sys/kernel.h> | |
555 | #include <sys/sysctl.h> | |
556 | ||
557 | static int lockpausetime = 0; | |
558 | SYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, ""); | |
559 | ||
560 | static int simplelockrecurse; | |
561 | ||
562 | /* | |
563 | * Simple lock functions so that the debugger can see from whence | |
564 | * they are being called. | |
565 | */ | |
566 | void | |
567 | simple_lock_init(alp) | |
568 | struct simplelock *alp; | |
569 | { | |
570 | ||
571 | alp->lock_data = 0; | |
572 | } | |
573 | ||
574 | void | |
575 | _simple_lock(alp, id, l) | |
576 | struct simplelock *alp; | |
577 | const char *id; | |
578 | int l; | |
579 | { | |
580 | ||
581 | if (simplelockrecurse) | |
582 | return; | |
583 | if (alp->lock_data == 1) { | |
584 | if (lockpausetime == -1) | |
585 | panic("%s:%d: simple_lock: lock held", id, l); | |
586 | printf("%s:%d: simple_lock: lock held\n", id, l); | |
587 | if (lockpausetime == 1) { | |
588 | Debugger("simple_lock"); | |
589 | /*BACKTRACE(curproc); */ | |
590 | } else if (lockpausetime > 1) { | |
591 | printf("%s:%d: simple_lock: lock held...", id, l); | |
592 | tsleep(&lockpausetime, PCATCH | PPAUSE, "slock", | |
593 | lockpausetime * hz); | |
594 | printf(" continuing\n"); | |
595 | } | |
596 | } | |
597 | alp->lock_data = 1; | |
598 | if (curproc) | |
599 | curproc->p_simple_locks++; | |
600 | } | |
601 | ||
602 | int | |
603 | _simple_lock_try(alp, id, l) | |
604 | struct simplelock *alp; | |
605 | const char *id; | |
606 | int l; | |
607 | { | |
608 | ||
609 | if (alp->lock_data) | |
610 | return (0); | |
611 | if (simplelockrecurse) | |
612 | return (1); | |
613 | alp->lock_data = 1; | |
614 | if (curproc) | |
615 | curproc->p_simple_locks++; | |
616 | return (1); | |
617 | } | |
618 | ||
619 | void | |
620 | _simple_unlock(alp, id, l) | |
621 | struct simplelock *alp; | |
622 | const char *id; | |
623 | int l; | |
624 | { | |
625 | ||
626 | if (simplelockrecurse) | |
627 | return; | |
628 | if (alp->lock_data == 0) { | |
629 | if (lockpausetime == -1) | |
630 | panic("%s:%d: simple_unlock: lock not held", id, l); | |
631 | printf("%s:%d: simple_unlock: lock not held\n", id, l); | |
632 | if (lockpausetime == 1) { | |
633 | Debugger("simple_unlock"); | |
634 | /* BACKTRACE(curproc); */ | |
635 | } else if (lockpausetime > 1) { | |
636 | printf("%s:%d: simple_unlock: lock not held...", id, l); | |
637 | tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock", | |
638 | lockpausetime * hz); | |
639 | printf(" continuing\n"); | |
640 | } | |
641 | } | |
642 | alp->lock_data = 0; | |
643 | if (curproc) | |
644 | curproc->p_simple_locks--; | |
645 | } | |
646 | #elif defined(SIMPLELOCK_DEBUG) | |
647 | #error "SIMPLELOCK_DEBUG is not compatible with SMP!" | |
648 | #endif /* SIMPLELOCK_DEBUG && MAXCPU == 1 */ |