Commit | Line | Data |
---|---|---|
b272101a | 1 | /* |
984263bc MD |
2 | * Copyright (c) 1995 |
3 | * The Regents of the University of California. All rights reserved. | |
3b6a19b2 | 4 | * Copyright (c) 2013-2017 |
05eacabc | 5 | * The DragonFly Project. All rights reserved. |
984263bc MD |
6 | * |
7 | * This code contains ideas from software contributed to Berkeley by | |
8 | * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating | |
9 | * System project at Carnegie-Mellon University. | |
10 | * | |
3b6a19b2 MD |
11 | * This code is derived from software contributed to The DragonFly Project |
12 | * by Matthew Dillon <dillon@backplane.com> | |
13 | * | |
984263bc MD |
14 | * Redistribution and use in source and binary forms, with or without |
15 | * modification, are permitted provided that the following conditions | |
16 | * are met: | |
17 | * 1. Redistributions of source code must retain the above copyright | |
18 | * notice, this list of conditions and the following disclaimer. | |
19 | * 2. Redistributions in binary form must reproduce the above copyright | |
20 | * notice, this list of conditions and the following disclaimer in the | |
21 | * documentation and/or other materials provided with the distribution. | |
2c64e990 | 22 | * 3. Neither the name of the University nor the names of its contributors |
984263bc MD |
23 | * may be used to endorse or promote products derived from this software |
24 | * without specific prior written permission. | |
25 | * | |
26 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
27 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
28 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
29 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
30 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
31 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
32 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
33 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
34 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
35 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
36 | * SUCH DAMAGE. | |
984263bc MD |
37 | */ |
38 | ||
05220613 MD |
39 | #ifndef _SYS_LOCK_H_ |
40 | #define _SYS_LOCK_H_ | |
41 | ||
42 | /* | |
43 | * A number of third party programs #include <sys/lock.h> for no good | |
b272101a | 44 | * reason. Don't actually include anything unless we are the kernel. |
05220613 MD |
45 | */ |
46 | #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES) | |
984263bc | 47 | |
984263bc | 48 | #include <machine/lock.h> |
8a8d5d85 MD |
49 | #ifndef _SYS_THREAD_H_ |
50 | #include <sys/thread.h> /* lwkt_token */ | |
51 | #endif | |
16523a43 MD |
52 | #ifndef _SYS_SPINLOCK_H_ |
53 | #include <sys/spinlock.h> | |
54 | #endif | |
984263bc MD |
55 | |
56 | /* | |
57 | * The general lock structure. Provides for multiple shared locks, | |
58 | * upgrading from shared to exclusive, and sleeping until the lock | |
8a8d5d85 | 59 | * can be gained. |
3b6a19b2 MD |
60 | * |
61 | * NOTE: We don't __cachealign struct lock, its too much bloat. Users | |
62 | * of struct lock may be able to arrange it within greater structures | |
63 | * in more SMP-friendly ways. | |
984263bc | 64 | */ |
dadab5e9 MD |
65 | struct thread; |
66 | ||
984263bc | 67 | struct lock { |
984263bc | 68 | u_int lk_flags; /* see below */ |
984263bc | 69 | int lk_timo; /* maximum sleep time (for tsleep) */ |
3b6a19b2 | 70 | uint64_t lk_count; /* see LKC_* bits */ |
05eacabc | 71 | const char *lk_wmesg; /* resource sleeping (for tsleep) */ |
dadab5e9 | 72 | struct thread *lk_lockholder; /* thread of excl lock holder */ |
984263bc | 73 | }; |
05eacabc | 74 | |
984263bc MD |
75 | /* |
76 | * Lock request types: | |
4e3d9e11 MD |
77 | * |
78 | * LK_SHARED | |
79 | * Get one of many possible shared locks. If a process holding an | |
80 | * exclusive lock requests a shared lock, the exclusive lock(s) will | |
81 | * be downgraded to shared locks. | |
82 | * | |
83 | * LK_EXCLUSIVE | |
84 | * Stop further shared locks, when they are cleared, grant a pending | |
85 | * upgrade if it exists, then grant an exclusive lock. Only one exclusive | |
86 | * lock may exist at a time, except that a process holding an exclusive | |
87 | * lock may get additional exclusive locks if it explicitly sets the | |
195008ae | 88 | * LK_CANRECURSE flag in the lock request, or if the LK_CANRECURSE flag |
4e3d9e11 MD |
89 | * was set when the lock was initialized. |
90 | * | |
91 | * LK_UPGRADE | |
92 | * The process must hold a shared lock that it wants to have upgraded | |
93 | * to an exclusive lock. Other processes may get exclusive access to | |
94 | * the resource between the time that the upgrade is requested and the | |
95 | * time that it is granted. | |
96 | * | |
97 | * LK_EXCLUPGRADE | |
98 | * the process must hold a shared lock that it wants to have upgraded | |
99 | * to an exclusive lock. If the request succeeds, no other processes | |
100 | * will have gotten exclusive access to the resource between the time | |
101 | * that the upgrade is requested and the time that it is granted. | |
102 | * However, if another process has already requested an upgrade, the | |
103 | * request will fail (see error returns below). | |
104 | * | |
105 | * LK_DOWNGRADE | |
106 | * The process must hold an exclusive lock that it wants to have | |
107 | * downgraded to a shared lock. If the process holds multiple (recursive) | |
108 | * exclusive locks, they will all be downgraded to shared locks. | |
109 | * | |
110 | * LK_RELEASE | |
111 | * Release one instance of a lock. | |
112 | * | |
113 | * LK_CANCEL_BEG | |
114 | * The current exclusive lock holder can cancel any blocked lock requests, | |
115 | * or any new requests, whos callers specified LK_CANCELABLE. They will | |
116 | * receive a ENOLCK error code. Cancel beg/end does not stack. | |
117 | * | |
118 | * The cancel command stays in effect until the exclusive lock holder | |
119 | * releases the last count on the lock or issues a LK_CANCEL_END command. | |
120 | * | |
121 | * LK_CANCEL_END | |
122 | * The current exclusive lock holder can stop canceling new requests | |
123 | * whos callers specify LK_CANCELABLE. The exclusive lock is maintained. | |
124 | * | |
125 | * Note that the last release of the exclusive lock will also | |
126 | * automatically end cancel mode. | |
127 | * | |
128 | * | |
129 | * --- | |
130 | * | |
984263bc MD |
131 | * LK_EXCLOTHER - return for lockstatus(). Used when another process |
132 | * holds the lock exclusively. | |
133 | * | |
134 | * These are flags that are passed to the lockmgr routine. | |
135 | */ | |
136 | #define LK_TYPE_MASK 0x0000000f /* type of lock sought */ | |
137 | #define LK_SHARED 0x00000001 /* shared lock */ | |
138 | #define LK_EXCLUSIVE 0x00000002 /* exclusive lock */ | |
139 | #define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */ | |
140 | #define LK_EXCLUPGRADE 0x00000004 /* first shared-to-exclusive upgrade */ | |
141 | #define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */ | |
142 | #define LK_RELEASE 0x00000006 /* release any type of lock */ | |
05eacabc | 143 | #define LK_WAITUPGRADE 0x00000007 |
984263bc | 144 | #define LK_EXCLOTHER 0x00000008 /* other process holds lock */ |
4e3d9e11 MD |
145 | #define LK_CANCEL_BEG 0x00000009 /* cancel other requests */ |
146 | #define LK_CANCEL_END 0x0000000a /* stop canceling other requests */ | |
05eacabc MD |
147 | |
148 | /* | |
149 | * lk_count bit fields. | |
150 | * | |
4e3d9e11 MD |
151 | * Positive count is exclusive, negative count is shared. The count field |
152 | * must be large enough to accomodate all possible threads. | |
05eacabc | 153 | */ |
3b6a19b2 MD |
154 | #define LKC_RESERVED8 0x0000000080000000LU /* (DNU, insn optimization) */ |
155 | #define LKC_EXREQ 0x0000000040000000LU /* waiting for excl lock */ | |
156 | #define LKC_SHARED 0x0000000020000000LU /* shared lock(s) granted */ | |
157 | #define LKC_UPREQ 0x0000000010000000LU /* waiting for upgrade */ | |
158 | #define LKC_EXREQ2 0x0000000008000000LU /* multi-wait for EXREQ */ | |
159 | #define LKC_CANCEL 0x0000000004000000LU /* cancel in effect */ | |
160 | #define LKC_XMASK 0x0000000003FFFFFFLU | |
161 | #define LKC_SMASK 0xFFFFFFFF00000000LU | |
162 | #define LKC_SCOUNT 0x0000000100000000LU | |
163 | #define LKC_SSHIFT 32 | |
05eacabc | 164 | |
984263bc MD |
165 | /* |
166 | * External lock flags. | |
167 | * | |
168 | * The first three flags may be set in lock_init to set their mode permanently, | |
4a3ab83d | 169 | * or passed in as arguments to the lock manager. |
984263bc | 170 | */ |
5b49787b | 171 | #define LK_EXTFLG_MASK 0x070000F0 /* mask of external flags */ |
984263bc MD |
172 | #define LK_NOWAIT 0x00000010 /* do not sleep to await lock */ |
173 | #define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */ | |
174 | #define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */ | |
5b49787b | 175 | #define LK_NOCOLLSTATS 0x00000080 /* v_lock_coll not applicable */ |
4e3d9e11 | 176 | #define LK_CANCELABLE 0x01000000 /* blocked caller can be canceled */ |
984263bc | 177 | #define LK_TIMELOCK 0x02000000 |
f2770c70 | 178 | #define LK_PCATCH 0x04000000 /* timelocked with signal catching */ |
05eacabc | 179 | |
984263bc MD |
180 | /* |
181 | * Control flags | |
182 | * | |
183 | * Non-persistent external flags. | |
184 | */ | |
b458d1ab | 185 | #define LK_FAILRECLAIM 0x00010000 /* vn_lock: allowed to fail on reclaim */ |
984263bc | 186 | #define LK_RETRY 0x00020000 /* vn_lock: retry until locked */ |
b458d1ab MD |
187 | #define LK_UNUSED40000 0x00040000 |
188 | #define LK_UNUSED80000 0x00080000 | |
984263bc | 189 | |
984263bc MD |
190 | /* |
191 | * Lock return status. | |
192 | * | |
193 | * Successfully obtained locks return 0. Locks will always succeed | |
194 | * unless one of the following is true: | |
195 | * LK_FORCEUPGRADE is requested and some other process has already | |
196 | * requested a lock upgrade (returns EBUSY). | |
197 | * LK_WAIT is set and a sleep would be required (returns EBUSY). | |
198 | * LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK). | |
199 | * PCATCH is set in lock priority and a signal arrives (returns | |
200 | * either EINTR or ERESTART if system calls is to be restarted). | |
201 | * Non-null lock timeout and timeout expires (returns EWOULDBLOCK). | |
202 | * A failed lock attempt always returns a non-zero error value. No lock | |
203 | * is held after an error return (in particular, a failed LK_UPGRADE | |
204 | * or LK_FORCEUPGRADE will have released its shared access lock). | |
205 | */ | |
206 | ||
207 | /* | |
208 | * Indicator that no process holds exclusive lock | |
209 | */ | |
dadab5e9 | 210 | #define LK_KERNTHREAD ((struct thread *)-2) |
984263bc | 211 | |
05220613 MD |
212 | #ifdef _KERNEL |
213 | ||
984263bc MD |
214 | void dumplockinfo(struct lock *lkp); |
215 | struct proc; | |
216 | ||
5d101ab9 FT |
217 | struct lock_args { |
218 | struct lock *la_lock; | |
219 | const char *la_desc; | |
124eeca9 | 220 | int la_flags; |
5d101ab9 FT |
221 | }; |
222 | ||
a78a0988 SZ |
223 | #define LOCK_INITIALIZER(wmesg, timo, flags) \ |
224 | { \ | |
225 | .lk_flags = ((flags) & LK_EXTFLG_MASK), \ | |
3b6a19b2 | 226 | .lk_timo = (timo), \ |
a78a0988 SZ |
227 | .lk_count = 0, \ |
228 | .lk_wmesg = wmesg, \ | |
3b6a19b2 | 229 | .lk_lockholder = NULL \ |
a78a0988 SZ |
230 | } |
231 | ||
55a2ee33 MP |
232 | void lockinit (struct lock *, const char *wmesg, int timo, int flags); |
233 | void lockreinit (struct lock *, const char *wmesg, int timo, int flags); | |
8fc3c98f | 234 | void lockuninit(struct lock *); |
5d101ab9 | 235 | void lock_sysinit(struct lock_args *); |
b1793cc6 MD |
236 | int lockmgr_shared (struct lock *, u_int flags); |
237 | int lockmgr_exclusive (struct lock *, u_int flags); | |
238 | int lockmgr_downgrade (struct lock *, u_int flags); | |
239 | int lockmgr_upgrade (struct lock *, u_int flags); | |
240 | int lockmgr_release (struct lock *, u_int flags); | |
241 | int lockmgr_cancel_beg (struct lock *, u_int flags); | |
242 | int lockmgr_cancel_end (struct lock *, u_int flags); | |
df4f70a6 | 243 | void lockmgr_kernproc (struct lock *); |
b153f746 RG |
244 | void lockmgr_printinfo (struct lock *); |
245 | int lockstatus (struct lock *, struct thread *); | |
43903f4c | 246 | int lockowned (struct lock *); |
984263bc | 247 | |
124eeca9 | 248 | #define LOCK_SYSINIT(name, lock, desc, flags) \ |
5d101ab9 FT |
249 | static struct lock_args name##_args = { \ |
250 | (lock), \ | |
124eeca9 FT |
251 | (desc), \ |
252 | (flags) \ | |
5d101ab9 FT |
253 | }; \ |
254 | SYSINIT(name##_lock_sysinit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, \ | |
b1793cc6 MD |
255 | lock_sysinit, &name##_args); \ |
256 | SYSUNINIT(name##_lock_sysuninit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, \ | |
5d101ab9 FT |
257 | lockuninit, (lock)) |
258 | ||
b1793cc6 MD |
259 | /* |
260 | * Most lockmgr() calls pass a constant flags parameter which | |
261 | * we can optimize-out with an inline. | |
262 | */ | |
263 | static __inline | |
264 | int | |
265 | lockmgr(struct lock *lkp, u_int flags) | |
266 | { | |
b272101a | 267 | switch (flags & LK_TYPE_MASK) { |
b1793cc6 MD |
268 | case LK_SHARED: |
269 | return lockmgr_shared(lkp, flags); | |
270 | case LK_EXCLUSIVE: | |
271 | return lockmgr_exclusive(lkp, flags); | |
272 | case LK_DOWNGRADE: | |
273 | return lockmgr_downgrade(lkp, flags); | |
274 | case LK_EXCLUPGRADE: | |
275 | case LK_UPGRADE: | |
276 | return lockmgr_upgrade(lkp, flags); | |
277 | case LK_RELEASE: | |
278 | return lockmgr_release(lkp, flags); | |
279 | case LK_CANCEL_BEG: | |
280 | return lockmgr_cancel_beg(lkp, flags); | |
281 | case LK_CANCEL_END: | |
282 | return lockmgr_cancel_end(lkp, flags); | |
283 | default: | |
284 | panic("lockmgr: unknown locktype request %d", | |
285 | flags & LK_TYPE_MASK); | |
286 | return EINVAL; /* NOT REACHED */ | |
287 | } | |
288 | } | |
289 | ||
3b6a19b2 MD |
290 | /* |
291 | * Returns non-zero if the lock is in-use. Cannot be used to count | |
292 | * refs on a lock (refs cannot be safely counted due to the use of | |
293 | * atomic_fetchadd_int() for shared locks. | |
294 | */ | |
295 | static __inline | |
296 | int | |
297 | lockinuse(struct lock *lkp) | |
298 | { | |
299 | return ((lkp->lk_count & (LKC_SMASK | LKC_XMASK)) != 0); | |
300 | } | |
301 | ||
a9ea4065 SW |
302 | /* |
303 | * Returns true if the lock was acquired. Can be used to port | |
304 | * FreeBSD's mtx_trylock() and similar functions. | |
305 | */ | |
306 | static __inline | |
307 | boolean_t | |
308 | lockmgr_try(struct lock *lkp, u_int flags) | |
309 | { | |
310 | return (lockmgr(lkp, flags | LK_NOWAIT) == 0); | |
311 | } | |
312 | ||
32d04ef8 MD |
313 | /* |
314 | * Returns true if the lock is exclusively held by anyone | |
315 | */ | |
316 | static __inline | |
317 | boolean_t | |
318 | lockmgr_anyexcl(struct lock *lkp) | |
319 | { | |
320 | return ((lkp->lk_count & LKC_XMASK) != 0); | |
321 | } | |
322 | ||
323 | static __inline | |
324 | boolean_t | |
325 | lockmgr_oneexcl(struct lock *lkp) | |
326 | { | |
327 | return ((lkp->lk_count & LKC_XMASK) == 1); | |
328 | } | |
329 | ||
330 | static __inline | |
331 | boolean_t | |
332 | lockmgr_exclpending(struct lock *lkp) | |
333 | { | |
334 | return ((lkp->lk_count & (LKC_EXREQ | LKC_EXREQ2)) != 0); | |
335 | } | |
336 | ||
05220613 MD |
337 | #endif /* _KERNEL */ |
338 | #endif /* _KERNEL || _KERNEL_STRUCTURES */ | |
339 | #endif /* _SYS_LOCK_H_ */ |