hammer2 - locking revamp
[dragonfly.git] / sys / vfs / hammer2 / hammer2_ccms.c
1 /*
2  * Copyright (c) 2006,2012-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * The Cache Coherency Management System (CCMS)
36  */
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/objcache.h>
43 #include <sys/sysctl.h>
44 #include <sys/uio.h>
45 #include <machine/limits.h>
46
47 #include <sys/spinlock2.h>
48
49 #include "hammer2_ccms.h"
50 #include "hammer2.h"
51
52 int ccms_debug = 0;
53
54 void
55 ccms_cst_init(ccms_cst_t *cst)
56 {
57         bzero(cst, sizeof(*cst));
58         spin_init(&cst->spin, "ccmscst");
59 }
60
61 void
62 ccms_cst_uninit(ccms_cst_t *cst)
63 {
64         KKASSERT(cst->count == 0);
65         if (cst->state != CCMS_STATE_INVALID) {
66                 /* XXX */
67         }
68 }
69
70 /************************************************************************
71  *                          CST SUPPORT FUNCTIONS                       *
72  ************************************************************************/
73
74 /*
75  * Acquire local cache state & lock.  If the current thread already holds
76  * the lock exclusively we bump the exclusive count, even if the thread is
77  * trying to get a shared lock.
78  */
79 void
80 ccms_thread_lock(ccms_cst_t *cst, ccms_state_t state)
81 {
82         /*
83          * Regardless of the type of lock requested if the current thread
84          * already holds an exclusive lock we bump the exclusive count and
85          * return.  This requires no spinlock.
86          */
87         LOCKENTER;
88         if (cst->count < 0 && cst->td == curthread) {
89                 --cst->count;
90                 return;
91         }
92
93         /*
94          * Otherwise use the spinlock to interlock the operation and sleep
95          * as necessary.
96          */
97         hammer2_spin_ex(&cst->spin);
98         if (state == CCMS_STATE_SHARED) {
99                 while (cst->count < 0 || cst->upgrade) {
100                         cst->blocked = 1;
101                         ssleep(cst, &cst->spin, 0, "ccmslck", hz);
102                 }
103                 ++cst->count;
104                 KKASSERT(cst->td == NULL);
105         } else if (state == CCMS_STATE_EXCLUSIVE) {
106                 while (cst->count != 0 || cst->upgrade) {
107                         cst->blocked = 1;
108                         ssleep(cst, &cst->spin, 0, "ccmslck", hz);
109                 }
110                 cst->count = -1;
111                 cst->td = curthread;
112         } else {
113                 hammer2_spin_unex(&cst->spin);
114                 panic("ccms_thread_lock: bad state %d\n", state);
115         }
116         hammer2_spin_unex(&cst->spin);
117 }
118
119 /*
120  * Same as ccms_thread_lock() but acquires the lock non-blocking.  Returns
121  * 0 on success, EBUSY on failure.
122  */
123 int
124 ccms_thread_lock_nonblock(ccms_cst_t *cst, ccms_state_t state)
125 {
126         if (cst->count < 0 && cst->td == curthread) {
127                 --cst->count;
128                 LOCKENTER;
129                 return(0);
130         }
131
132         hammer2_spin_ex(&cst->spin);
133         if (state == CCMS_STATE_SHARED) {
134                 if (cst->count < 0 || cst->upgrade) {
135                         hammer2_spin_unex(&cst->spin);
136                         return (EBUSY);
137                 }
138                 ++cst->count;
139                 KKASSERT(cst->td == NULL);
140         } else if (state == CCMS_STATE_EXCLUSIVE) {
141                 if (cst->count != 0 || cst->upgrade) {
142                         hammer2_spin_unex(&cst->spin);
143                         return (EBUSY);
144                 }
145                 cst->count = -1;
146                 cst->td = curthread;
147         } else {
148                 hammer2_spin_unex(&cst->spin);
149                 panic("ccms_thread_lock_nonblock: bad state %d\n", state);
150         }
151         hammer2_spin_unex(&cst->spin);
152         LOCKENTER;
153         return(0);
154 }
155
156 ccms_state_t
157 ccms_thread_lock_temp_release(ccms_cst_t *cst)
158 {
159         if (cst->count < 0) {
160                 ccms_thread_unlock(cst);
161                 return(CCMS_STATE_EXCLUSIVE);
162         }
163         if (cst->count > 0) {
164                 ccms_thread_unlock(cst);
165                 return(CCMS_STATE_SHARED);
166         }
167         return (CCMS_STATE_INVALID);
168 }
169
170 void
171 ccms_thread_lock_temp_restore(ccms_cst_t *cst, ccms_state_t ostate)
172 {
173         ccms_thread_lock(cst, ostate);
174 }
175
176 /*
177  * Temporarily upgrade a thread lock for making local structural changes.
178  * No new shared or exclusive locks can be acquired by others while we are
179  * upgrading, but other upgraders are allowed.
180  */
181 ccms_state_t
182 ccms_thread_lock_upgrade(ccms_cst_t *cst)
183 {
184         /*
185          * Nothing to do if already exclusive
186          */
187         if (cst->count < 0) {
188                 KKASSERT(cst->td == curthread);
189                 return(CCMS_STATE_EXCLUSIVE);
190         }
191
192         /*
193          * Convert a shared lock to exclusive.
194          */
195         if (cst->count > 0) {
196                 hammer2_spin_ex(&cst->spin);
197                 ++cst->upgrade;
198                 --cst->count;
199                 while (cst->count) {
200                         cst->blocked = 1;
201                         ssleep(cst, &cst->spin, 0, "ccmsupg", hz);
202                 }
203                 cst->count = -1;
204                 cst->td = curthread;
205                 hammer2_spin_unex(&cst->spin);
206                 return(CCMS_STATE_SHARED);
207         }
208         panic("ccms_thread_lock_upgrade: not locked");
209         /* NOT REACHED */
210         return(0);
211 }
212
213 void
214 ccms_thread_lock_downgrade(ccms_cst_t *cst, ccms_state_t ostate)
215 {
216         if (ostate == CCMS_STATE_SHARED) {
217                 KKASSERT(cst->td == curthread);
218                 KKASSERT(cst->count == -1);
219                 hammer2_spin_ex(&cst->spin);
220                 --cst->upgrade;
221                 cst->count = 1;
222                 cst->td = NULL;
223                 if (cst->blocked) {
224                         cst->blocked = 0;
225                         hammer2_spin_unex(&cst->spin);
226                         wakeup(cst);
227                 } else {
228                         hammer2_spin_unex(&cst->spin);
229                 }
230         }
231         /* else nothing to do if excl->excl */
232 }
233
234 /*
235  * Release a local thread lock
236  */
237 void
238 ccms_thread_unlock(ccms_cst_t *cst)
239 {
240         LOCKEXIT;
241         if (cst->count < 0) {
242                 /*
243                  * Exclusive
244                  */
245                 KKASSERT(cst->td == curthread);
246                 if (cst->count < -1) {
247                         ++cst->count;
248                         return;
249                 }
250                 hammer2_spin_ex(&cst->spin);
251                 KKASSERT(cst->count == -1);
252                 cst->count = 0;
253                 cst->td = NULL;
254                 if (cst->blocked) {
255                         cst->blocked = 0;
256                         hammer2_spin_unex(&cst->spin);
257                         wakeup(cst);
258                         return;
259                 }
260                 hammer2_spin_unex(&cst->spin);
261         } else if (cst->count > 0) {
262                 /*
263                  * Shared
264                  */
265                 hammer2_spin_ex(&cst->spin);
266                 if (--cst->count == 0 && cst->blocked) {
267                         cst->blocked = 0;
268                         hammer2_spin_unex(&cst->spin);
269                         wakeup(cst);
270                         return;
271                 }
272                 hammer2_spin_unex(&cst->spin);
273         } else {
274                 panic("ccms_thread_unlock: bad zero count\n");
275         }
276 }
277
278 void
279 ccms_thread_lock_setown(ccms_cst_t *cst)
280 {
281         KKASSERT(cst->count < 0);
282         cst->td = curthread;
283 }
284
285 /*
286  * Release a previously upgraded local thread lock
287  */
288 void
289 ccms_thread_unlock_upgraded(ccms_cst_t *cst, ccms_state_t ostate)
290 {
291         if (ostate == CCMS_STATE_SHARED) {
292                 LOCKEXIT;
293                 KKASSERT(cst->td == curthread);
294                 KKASSERT(cst->count == -1);
295                 hammer2_spin_ex(&cst->spin);
296                 --cst->upgrade;
297                 cst->count = 0;
298                 cst->td = NULL;
299                 if (cst->blocked) {
300                         cst->blocked = 0;
301                         hammer2_spin_unex(&cst->spin);
302                         wakeup(cst);
303                 } else {
304                         hammer2_spin_unex(&cst->spin);
305                 }
306         } else {
307                 ccms_thread_unlock(cst);
308         }
309 }
310
311 int
312 ccms_thread_lock_owned(ccms_cst_t *cst)
313 {
314         return(cst->count < 0 && cst->td == curthread);
315 }