2 * Copyright (c) 2006,2012-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * The Cache Coherency Management System (CCMS)
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/objcache.h>
43 #include <sys/sysctl.h>
45 #include <machine/limits.h>
47 #include <sys/spinlock2.h>
49 #include "hammer2_ccms.h"
55 ccms_cst_init(ccms_cst_t *cst)
57 bzero(cst, sizeof(*cst));
58 spin_init(&cst->spin, "ccmscst");
62 ccms_cst_uninit(ccms_cst_t *cst)
64 KKASSERT(cst->count == 0);
65 if (cst->state != CCMS_STATE_INVALID) {
70 /************************************************************************
71 * CST SUPPORT FUNCTIONS *
72 ************************************************************************/
75 * Acquire local cache state & lock. If the current thread already holds
76 * the lock exclusively we bump the exclusive count, even if the thread is
77 * trying to get a shared lock.
80 ccms_thread_lock(ccms_cst_t *cst, ccms_state_t state)
83 * Regardless of the type of lock requested if the current thread
84 * already holds an exclusive lock we bump the exclusive count and
85 * return. This requires no spinlock.
88 if (cst->count < 0 && cst->td == curthread) {
94 * Otherwise use the spinlock to interlock the operation and sleep
97 hammer2_spin_ex(&cst->spin);
98 if (state == CCMS_STATE_SHARED) {
99 while (cst->count < 0 || cst->upgrade) {
101 ssleep(cst, &cst->spin, 0, "ccmslck", hz);
104 KKASSERT(cst->td == NULL);
105 } else if (state == CCMS_STATE_EXCLUSIVE) {
106 while (cst->count != 0 || cst->upgrade) {
108 ssleep(cst, &cst->spin, 0, "ccmslck", hz);
113 hammer2_spin_unex(&cst->spin);
114 panic("ccms_thread_lock: bad state %d\n", state);
116 hammer2_spin_unex(&cst->spin);
120 * Same as ccms_thread_lock() but acquires the lock non-blocking. Returns
121 * 0 on success, EBUSY on failure.
124 ccms_thread_lock_nonblock(ccms_cst_t *cst, ccms_state_t state)
126 if (cst->count < 0 && cst->td == curthread) {
132 hammer2_spin_ex(&cst->spin);
133 if (state == CCMS_STATE_SHARED) {
134 if (cst->count < 0 || cst->upgrade) {
135 hammer2_spin_unex(&cst->spin);
139 KKASSERT(cst->td == NULL);
140 } else if (state == CCMS_STATE_EXCLUSIVE) {
141 if (cst->count != 0 || cst->upgrade) {
142 hammer2_spin_unex(&cst->spin);
148 hammer2_spin_unex(&cst->spin);
149 panic("ccms_thread_lock_nonblock: bad state %d\n", state);
151 hammer2_spin_unex(&cst->spin);
157 ccms_thread_lock_temp_release(ccms_cst_t *cst)
159 if (cst->count < 0) {
160 ccms_thread_unlock(cst);
161 return(CCMS_STATE_EXCLUSIVE);
163 if (cst->count > 0) {
164 ccms_thread_unlock(cst);
165 return(CCMS_STATE_SHARED);
167 return (CCMS_STATE_INVALID);
171 ccms_thread_lock_temp_restore(ccms_cst_t *cst, ccms_state_t ostate)
173 ccms_thread_lock(cst, ostate);
177 * Temporarily upgrade a thread lock for making local structural changes.
178 * No new shared or exclusive locks can be acquired by others while we are
179 * upgrading, but other upgraders are allowed.
182 ccms_thread_lock_upgrade(ccms_cst_t *cst)
185 * Nothing to do if already exclusive
187 if (cst->count < 0) {
188 KKASSERT(cst->td == curthread);
189 return(CCMS_STATE_EXCLUSIVE);
193 * Convert a shared lock to exclusive.
195 if (cst->count > 0) {
196 hammer2_spin_ex(&cst->spin);
201 ssleep(cst, &cst->spin, 0, "ccmsupg", hz);
205 hammer2_spin_unex(&cst->spin);
206 return(CCMS_STATE_SHARED);
208 panic("ccms_thread_lock_upgrade: not locked");
214 ccms_thread_lock_downgrade(ccms_cst_t *cst, ccms_state_t ostate)
216 if (ostate == CCMS_STATE_SHARED) {
217 KKASSERT(cst->td == curthread);
218 KKASSERT(cst->count == -1);
219 hammer2_spin_ex(&cst->spin);
225 hammer2_spin_unex(&cst->spin);
228 hammer2_spin_unex(&cst->spin);
231 /* else nothing to do if excl->excl */
235 * Release a local thread lock
238 ccms_thread_unlock(ccms_cst_t *cst)
241 if (cst->count < 0) {
245 KKASSERT(cst->td == curthread);
246 if (cst->count < -1) {
250 hammer2_spin_ex(&cst->spin);
251 KKASSERT(cst->count == -1);
256 hammer2_spin_unex(&cst->spin);
260 hammer2_spin_unex(&cst->spin);
261 } else if (cst->count > 0) {
265 hammer2_spin_ex(&cst->spin);
266 if (--cst->count == 0 && cst->blocked) {
268 hammer2_spin_unex(&cst->spin);
272 hammer2_spin_unex(&cst->spin);
274 panic("ccms_thread_unlock: bad zero count\n");
279 ccms_thread_lock_setown(ccms_cst_t *cst)
281 KKASSERT(cst->count < 0);
286 * Release a previously upgraded local thread lock
289 ccms_thread_unlock_upgraded(ccms_cst_t *cst, ccms_state_t ostate)
291 if (ostate == CCMS_STATE_SHARED) {
293 KKASSERT(cst->td == curthread);
294 KKASSERT(cst->count == -1);
295 hammer2_spin_ex(&cst->spin);
301 hammer2_spin_unex(&cst->spin);
304 hammer2_spin_unex(&cst->spin);
307 ccms_thread_unlock(cst);
312 ccms_thread_lock_owned(ccms_cst_t *cst)
314 return(cst->count < 0 && cst->td == curthread);