2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include "thr_private.h"
37 #define cpu_ccfence() __asm __volatile("" : : : "memory")
40 * This function is used to acquire a contested lock.
42 * A *mtx value of 1 indicates locked normally.
43 * A *mtx value of 2 indicates locked and contested.
46 __thr_umtx_lock(volatile umtx_t *mtx, int id, int timo)
48 int v, errval, ret = 0;
56 if (atomic_cmpset_acq_int(mtx, 0, id)) {
61 if ((v & 0x40000000) ||
62 atomic_cmpset_acq_int(mtx, v, v|0x40000000)) {
64 _umtx_sleep_err(mtx, v|0x40000000, timo);
65 } else if ((errval = _umtx_sleep_err(mtx, v|0x40000000, timo)) > 0) {
66 if (errval == EAGAIN) {
67 if (atomic_cmpset_acq_int(mtx, 0, id))
80 * Release a mutex. A contested mutex has a value
81 * of 2, an uncontested mutex has a value of 1.
84 __thr_umtx_unlock(volatile umtx_t *mtx, int id)
92 if (atomic_cmpset_acq_int(mtx, v, 0)) {
94 _umtx_wakeup_err(mtx, 1);
95 THR_ASSERT((v & 0x3FFFFFFF) == id,
96 "thr_umtx_unlock: wrong owner");
103 * Low level timed umtx lock. This function must never return
107 __thr_umtx_timedlock(volatile umtx_t *mtx, int id,
108 const struct timespec *timeout)
110 struct timespec ts, ts2, ts3;
113 if ((timeout->tv_sec < 0) ||
114 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) {
118 /* XXX there should have MONO timer! */
119 clock_gettime(CLOCK_REALTIME, &ts);
120 TIMESPEC_ADD(&ts, &ts, timeout);
127 timo = (int)(ts2.tv_nsec / 1000);
133 ret = __thr_umtx_lock(mtx, id, timo);
134 if (ret != EINTR && ret != ETIMEDOUT)
136 clock_gettime(CLOCK_REALTIME, &ts3);
137 TIMESPEC_SUB(&ts2, &ts, &ts3);
138 if (ts2.tv_sec < 0 ||
139 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
148 _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout,
151 struct timespec ts, ts2, ts3;
152 int timo, errval, ret = 0;
158 if (timeout == NULL) {
160 * NOTE: If no timeout, EINTR cannot be returned. Ignore
163 while ((errval = _umtx_sleep_err(mtx, exp, 10000000)) > 0) {
167 if (errval == ETIMEDOUT || errval == EWOULDBLOCK) {
170 "thr_umtx_wait: FAULT VALUE CHANGE "
171 "%d -> %d oncond %p\n",
183 * Timed waits can return EINTR
185 if ((timeout->tv_sec < 0) ||
186 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))
189 clock_gettime(clockid, &ts);
190 TIMESPEC_ADD(&ts, &ts, timeout);
195 timo = (int)(ts2.tv_nsec / 1000);
202 if ((errval = _umtx_sleep_err(mtx, exp, timo)) > 0) {
203 if (errval == EBUSY) {
207 if (errval == EINTR) {
213 clock_gettime(clockid, &ts3);
214 TIMESPEC_SUB(&ts2, &ts, &ts3);
215 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
224 _thr_umtx_wake(volatile umtx_t *mtx, int count)
226 _umtx_wakeup_err(mtx, count);