2 * Copyright (c) 2006,2012-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * The Cache Coherency Management System (CCMS)
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <machine/limits.h>
43 #include <sys/spinlock2.h>
45 #include "hammer2_ccms.h"
51 ccms_cst_init(ccms_cst_t *cst)
53 bzero(cst, sizeof(*cst));
54 hammer2_spin_init(&cst->spin, "ccmscst");
58 ccms_cst_uninit(ccms_cst_t *cst)
60 KKASSERT(cst->count == 0);
61 if (cst->state != CCMS_STATE_INVALID) {
66 /************************************************************************
67 * CST SUPPORT FUNCTIONS *
68 ************************************************************************/
71 * Acquire local cache state & lock. If the current thread already holds
72 * the lock exclusively we bump the exclusive count, even if the thread is
73 * trying to get a shared lock.
76 ccms_thread_lock(ccms_cst_t *cst, ccms_state_t state)
79 * Regardless of the type of lock requested if the current thread
80 * already holds an exclusive lock we bump the exclusive count and
81 * return. This requires no spinlock.
84 if (cst->count < 0 && cst->td == curthread) {
90 * Otherwise use the spinlock to interlock the operation and sleep
93 hammer2_spin_ex(&cst->spin);
94 if (state == CCMS_STATE_SHARED) {
95 while (cst->count < 0 || cst->upgrade) {
97 ssleep(cst, &cst->spin, 0, "ccmslck", hz);
100 KKASSERT(cst->td == NULL);
101 } else if (state == CCMS_STATE_EXCLUSIVE) {
102 while (cst->count != 0 || cst->upgrade) {
104 ssleep(cst, &cst->spin, 0, "ccmslck", hz);
109 hammer2_spin_unex(&cst->spin);
110 panic("ccms_thread_lock: bad state %d\n", state);
112 hammer2_spin_unex(&cst->spin);
116 * Same as ccms_thread_lock() but acquires the lock non-blocking. Returns
117 * 0 on success, EBUSY on failure.
120 ccms_thread_lock_nonblock(ccms_cst_t *cst, ccms_state_t state)
122 if (cst->count < 0 && cst->td == curthread) {
128 hammer2_spin_ex(&cst->spin);
129 if (state == CCMS_STATE_SHARED) {
130 if (cst->count < 0 || cst->upgrade) {
131 hammer2_spin_unex(&cst->spin);
135 KKASSERT(cst->td == NULL);
136 } else if (state == CCMS_STATE_EXCLUSIVE) {
137 if (cst->count != 0 || cst->upgrade) {
138 hammer2_spin_unex(&cst->spin);
144 hammer2_spin_unex(&cst->spin);
145 panic("ccms_thread_lock_nonblock: bad state %d\n", state);
147 hammer2_spin_unex(&cst->spin);
153 ccms_thread_lock_temp_release(ccms_cst_t *cst)
155 if (cst->count < 0) {
156 ccms_thread_unlock(cst);
157 return(CCMS_STATE_EXCLUSIVE);
159 if (cst->count > 0) {
160 ccms_thread_unlock(cst);
161 return(CCMS_STATE_SHARED);
163 return (CCMS_STATE_INVALID);
167 ccms_thread_lock_temp_restore(ccms_cst_t *cst, ccms_state_t ostate)
169 ccms_thread_lock(cst, ostate);
173 * Temporarily upgrade a thread lock for making local structural changes.
174 * No new shared or exclusive locks can be acquired by others while we are
175 * upgrading, but other upgraders are allowed.
178 ccms_thread_lock_upgrade(ccms_cst_t *cst)
181 * Nothing to do if already exclusive
183 if (cst->count < 0) {
184 KKASSERT(cst->td == curthread);
185 return(CCMS_STATE_EXCLUSIVE);
189 * Convert a shared lock to exclusive.
191 if (cst->count > 0) {
192 hammer2_spin_ex(&cst->spin);
197 ssleep(cst, &cst->spin, 0, "ccmsupg", hz);
201 hammer2_spin_unex(&cst->spin);
202 return(CCMS_STATE_SHARED);
204 panic("ccms_thread_lock_upgrade: not locked");
210 ccms_thread_lock_downgrade(ccms_cst_t *cst, ccms_state_t ostate)
212 if (ostate == CCMS_STATE_SHARED) {
213 KKASSERT(cst->td == curthread);
214 KKASSERT(cst->count == -1);
215 hammer2_spin_ex(&cst->spin);
221 hammer2_spin_unex(&cst->spin);
224 hammer2_spin_unex(&cst->spin);
227 /* else nothing to do if excl->excl */
231 * Release a local thread lock
234 ccms_thread_unlock(ccms_cst_t *cst)
237 if (cst->count < 0) {
241 KKASSERT(cst->td == curthread);
242 if (cst->count < -1) {
246 hammer2_spin_ex(&cst->spin);
247 KKASSERT(cst->count == -1);
252 hammer2_spin_unex(&cst->spin);
256 hammer2_spin_unex(&cst->spin);
257 } else if (cst->count > 0) {
261 hammer2_spin_ex(&cst->spin);
262 if (--cst->count == 0 && cst->blocked) {
264 hammer2_spin_unex(&cst->spin);
268 hammer2_spin_unex(&cst->spin);
270 panic("ccms_thread_unlock: bad zero count\n");
275 ccms_thread_lock_setown(ccms_cst_t *cst)
277 KKASSERT(cst->count < 0);
282 * Release a previously upgraded local thread lock
285 ccms_thread_unlock_upgraded(ccms_cst_t *cst, ccms_state_t ostate)
287 if (ostate == CCMS_STATE_SHARED) {
289 KKASSERT(cst->td == curthread);
290 KKASSERT(cst->count == -1);
291 hammer2_spin_ex(&cst->spin);
297 hammer2_spin_unex(&cst->spin);
300 hammer2_spin_unex(&cst->spin);
303 ccms_thread_unlock(cst);
308 ccms_thread_lock_owned(ccms_cst_t *cst)
310 return(cst->count < 0 && cst->td == curthread);