2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $DragonFly: src/lib/libthread_xu/thread/thr_umtx.c,v 1.2 2005/03/15 11:24:23 davidxu Exp $
32 * Part of these code is derived from /usr/src/test/debug/umtx.c.
40 #include "thr_private.h"
42 static int get_contested(volatile umtx_t *mtx, int timo);
45 __thr_umtx_lock(volatile umtx_t *mtx, int timo)
52 if ((v & UMTX_LOCKED) == 0) {
53 /* not locked, attempt to lock. */
54 if (atomic_cmpset_acq_int(mtx, v, v | UMTX_LOCKED)) {
60 * Locked, bump the contested count and obtain
61 * the contested mutex.
63 if (atomic_cmpset_acq_int(mtx, v, v + 1)) {
64 ret = get_contested(mtx, timo);
74 get_contested(volatile umtx_t *mtx, int timo)
81 assert(v & ~UMTX_LOCKED); /* our contesting count still there */
82 if ((v & UMTX_LOCKED) == 0) {
84 * Not locked, attempt to remove our contested
85 * count and lock at the same time.
87 if (atomic_cmpset_acq_int(mtx, v, (v - 1) | UMTX_LOCKED)) {
93 * Retried after resuming from umtx_sleep, try to leave if there
94 * was error, e.g, timeout.
97 if (atomic_cmpset_acq_int(mtx, v, v - 1))
104 * Still locked, sleep and try again.
107 umtx_sleep(mtx, v, 0);
109 if (umtx_sleep(mtx, v, timo) < 0) {
121 __thr_umtx_unlock(volatile umtx_t *mtx)
127 assert(v & UMTX_LOCKED); /* we still have it locked */
128 if (v == UMTX_LOCKED) {
130 * We hold an uncontested lock, try to set to an unlocked
133 if (atomic_cmpset_acq_int(mtx, UMTX_LOCKED, 0))
137 * We hold a contested lock, unlock and wakeup exactly
138 * one sleeper. It is possible for this to race a new
139 * thread obtaining a lock, in which case any contested
140 * sleeper we wake up will simply go back to sleep.
142 if (atomic_cmpset_acq_int(mtx, v, v & ~UMTX_LOCKED)) {
151 __thr_umtx_timedlock(volatile umtx_t *mtx, const struct timespec *timeout)
153 struct timespec ts, ts2, ts3;
156 if ((timeout->tv_sec < 0) ||
157 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))
160 /* XXX there should have MONO timer! */
161 clock_gettime(CLOCK_REALTIME, &ts);
162 TIMESPEC_ADD(&ts, &ts, timeout);
167 timo = (int)(ts2.tv_nsec / 1000);
173 ret = __thr_umtx_lock(mtx, timo);
174 if (ret != ETIMEDOUT)
176 clock_gettime(CLOCK_REALTIME, &ts3);
177 TIMESPEC_SUB(&ts2, &ts, &ts3);
178 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
187 _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout,
190 struct timespec ts, ts2, ts3;
196 if (timeout == NULL) {
197 if (umtx_sleep(mtx, exp, 0) < 0) {
204 if ((timeout->tv_sec < 0) ||
205 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))
208 clock_gettime(clockid, &ts);
209 TIMESPEC_ADD(&ts, &ts, timeout);
214 timo = (int)(ts2.tv_nsec / 1000);
220 if (umtx_sleep(mtx, exp, timo) < 0) {
221 if (errno == EBUSY) {
224 } else if (errno == EINTR) {
229 clock_gettime(clockid, &ts3);
230 TIMESPEC_SUB(&ts2, &ts, &ts3);
231 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
239 void _thr_umtx_wake(volatile umtx_t *mtx, int count)
241 umtx_wakeup(mtx, count);