2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/acpica/Osd/OsdSynch.c,v 1.21 2004/05/05 20:07:52 njl Exp $
28 * $DragonFly: src/sys/dev/acpica5/Osd/OsdSynch.c,v 1.11 2007/01/25 15:12:06 y0netan1 Exp $
32 * 6.1 : Mutual Exclusion and Synchronisation
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/sysctl.h>
44 #include <sys/thread.h>
45 #include <sys/thread2.h>
46 #include <sys/spinlock2.h>
48 #define _COMPONENT ACPI_OS_SERVICES
49 ACPI_MODULE_NAME("SYNCH")
51 MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
53 #define AS_LOCK(as) spin_lock_wr(&(as)->as_spin)
54 #define AS_UNLOCK(as) spin_unlock_wr(&(as)->as_spin)
58 * Simple counting semaphore implemented using a mutex. (Subsequently used
59 * in the OSI code to implement a mutex. Go figure.)
61 struct acpi_semaphore {
62 struct spinlock as_spin;
70 #ifndef ACPI_NO_SEMAPHORES
71 #ifndef ACPI_SEMAPHORES_MAX_PENDING
72 #define ACPI_SEMAPHORES_MAX_PENDING 4
74 static int acpi_semaphore_debug = 0;
75 TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
76 SYSCTL_DECL(_debug_acpi);
77 SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
78 &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
79 #endif /* !ACPI_NO_SEMAPHORES */
82 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
83 ACPI_HANDLE *OutHandle)
85 #ifndef ACPI_NO_SEMAPHORES
86 struct acpi_semaphore *as;
88 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
90 if (OutHandle == NULL)
91 return_ACPI_STATUS (AE_BAD_PARAMETER);
92 if (InitialUnits > MaxUnits)
93 return_ACPI_STATUS (AE_BAD_PARAMETER);
95 as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO);
97 spin_init(&as->as_spin);
98 as->as_units = InitialUnits;
99 as->as_maxunits = MaxUnits;
100 as->as_pendings = as->as_resetting = as->as_timeouts = 0;
102 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
103 "created semaphore %p max %d, initial %d\n",
104 as, InitialUnits, MaxUnits));
106 *OutHandle = (ACPI_HANDLE)as;
108 *OutHandle = (ACPI_HANDLE)OutHandle;
109 #endif /* !ACPI_NO_SEMAPHORES */
111 return_ACPI_STATUS (AE_OK);
115 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
117 #ifndef ACPI_NO_SEMAPHORES
118 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
120 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
122 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
123 spin_uninit(&as->as_spin);
124 kfree(as, M_ACPISEM);
125 #endif /* !ACPI_NO_SEMAPHORES */
127 return_ACPI_STATUS (AE_OK);
131 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
133 #ifndef ACPI_NO_SEMAPHORES
135 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
137 struct timeval timeouttv, currenttv, timelefttv;
140 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
143 return_ACPI_STATUS (AE_BAD_PARAMETER);
146 return_ACPI_STATUS (AE_OK);
149 if (as->as_units < Units && as->as_timeouts > 10) {
150 kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
152 as->as_units = as->as_maxunits;
154 as->as_resetting = 1;
158 return_ACPI_STATUS (AE_TIME);
161 if (as->as_resetting)
162 return_ACPI_STATUS (AE_TIME);
165 /* a timeout of ACPI_WAIT_FOREVER means "forever" */
166 if (Timeout == ACPI_WAIT_FOREVER) {
168 timeouttv.tv_sec = ((0xffff/1000) + 1); /* cf. ACPI spec */
169 timeouttv.tv_usec = 0;
171 /* compute timeout using microseconds per tick */
172 tmo = (Timeout * 1000) / (1000000 / hz);
175 timeouttv.tv_sec = Timeout / 1000;
176 timeouttv.tv_usec = (Timeout % 1000) * 1000;
179 /* calculate timeout value in timeval */
180 getmicrouptime(¤ttv);
181 timevaladd(&timeouttv, ¤ttv);
184 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
185 "get %d units from semaphore %p (has %d), timeout %d\n",
186 Units, as, as->as_units, Timeout));
188 if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
192 if (as->as_units >= Units) {
193 as->as_units -= Units;
198 /* limit number of pending treads */
199 if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
204 /* if timeout values of zero is specified, return immediately */
210 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
211 "semaphore blocked, calling ssleep(%p, %p, %d, \"acsem\", %d)\n",
212 as, &as->as_spin, PCATCH, tmo));
216 if (acpi_semaphore_debug) {
217 kprintf("%s: Sleep %jd, pending %jd, semaphore %p, thread %jd\n",
218 __func__, (intmax_t)Timeout,
219 (intmax_t)as->as_pendings, as,
220 (intmax_t)AcpiOsGetThreadId());
223 rv = ssleep(as, &as->as_spin, PCATCH, "acsem", tmo);
228 if (as->as_resetting) {
229 /* semaphore reset, return immediately */
230 if (as->as_pendings == 0) {
231 as->as_resetting = 0;
238 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "ssleep(%d) returned %d\n", tmo, rv));
239 if (rv == EWOULDBLOCK) {
244 /* check if we already awaited enough */
245 timelefttv = timeouttv;
246 getmicrouptime(¤ttv);
247 timevalsub(&timelefttv, ¤ttv);
248 if (timelefttv.tv_sec < 0) {
249 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
255 /* adjust timeout for the next sleep */
256 tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
261 if (acpi_semaphore_debug) {
262 kprintf("%s: Wakeup timeleft(%ju, %ju), tmo %ju, sem %p, thread %jd\n",
264 (intmax_t)timelefttv.tv_sec, (intmax_t)timelefttv.tv_usec,
265 (intmax_t)tmo, as, (intmax_t)AcpiOsGetThreadId());
269 if (acpi_semaphore_debug) {
270 if (result == AE_TIME && Timeout > 0) {
271 kprintf("%s: Timeout %d, pending %d, semaphore %p\n",
272 __func__, Timeout, as->as_pendings, as);
274 if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
275 kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %jd\n",
276 __func__, Units, as->as_units, as->as_pendings, as,
277 (intmax_t)AcpiOsGetThreadId());
281 if (result == AE_TIME)
287 return_ACPI_STATUS (result);
289 return_ACPI_STATUS (AE_OK);
290 #endif /* !ACPI_NO_SEMAPHORES */
294 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
296 #ifndef ACPI_NO_SEMAPHORES
297 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
300 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
303 return_ACPI_STATUS(AE_BAD_PARAMETER);
306 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
307 "return %d units to semaphore %p (has %d)\n",
308 Units, as, as->as_units));
309 if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
310 as->as_units += Units;
311 if (as->as_units > as->as_maxunits)
312 as->as_units = as->as_maxunits;
315 if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
316 kprintf("%s: Release %d, units %d, pending %d, semaphore %p, thread %jd\n",
317 __func__, Units, as->as_units, as->as_pendings, as,
318 (intmax_t)AcpiOsGetThreadId());
323 #endif /* !ACPI_NO_SEMAPHORES */
325 return_ACPI_STATUS (AE_OK);
328 struct acpi_spinlock {
329 struct spinlock lock;
330 #ifdef ACPI_DEBUG_LOCKS
338 AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
342 if (OutHandle == NULL)
343 return (AE_BAD_PARAMETER);
344 spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO);
345 spin_init(&spin->lock);
346 #ifdef ACPI_DEBUG_LOCKS
356 AcpiOsDeleteLock (ACPI_SPINLOCK Spin)
360 spin_uninit(&Spin->lock);
361 kfree(Spin, M_ACPISEM);
365 * OS-dependent locking primitives. These routines should be able to be
366 * called from an interrupt-handler or cpu_idle thread.
368 * NB: some of ACPI-CA functions with locking flags, say AcpiSetRegister(),
369 * are changed to unconditionally call AcpiOsAcquireLock/AcpiOsReleaseLock.
372 #ifdef ACPI_DEBUG_LOCKS
373 _AcpiOsAcquireLock (ACPI_SPINLOCK Spin, const char *func, int line)
375 AcpiOsAcquireLock (ACPI_SPINLOCK Spin)
378 spin_lock_wr(&Spin->lock);
380 #ifdef ACPI_DEBUG_LOCKS
382 kprintf("%p(%s:%d): acpi_spinlock %p already held by %p(%s:%d)\n",
383 curthread, func, line, Spin, Spin->owner, Spin->func,
387 Spin->owner = curthread;
396 AcpiOsReleaseLock (ACPI_SPINLOCK Spin, ACPI_CPU_FLAGS Flags)
398 #ifdef ACPI_DEBUG_LOCKS
400 if (Spin->owner != NULL) {
401 kprintf("%p: acpi_spinlock %p is unexectedly held by %p(%s:%d)\n",
402 curthread, Spin, Spin->owner, Spin->func, Spin->line);
411 spin_unlock_wr(&Spin->lock);
414 /* Section 5.2.9.1: global lock acquire/release functions */
415 #define GL_ACQUIRED (-1)
417 #define GL_BIT_PENDING 0x1
418 #define GL_BIT_OWNED 0x2
419 #define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED)
422 * Acquire the global lock. If busy, set the pending bit. The caller
423 * will wait for notification from the BIOS that the lock is available
424 * and then attempt to acquire it again.
427 acpi_acquire_global_lock(uint32_t *lock)
433 new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
434 ((old >> 1) & GL_BIT_PENDING);
435 } while (atomic_cmpset_int(lock, old, new) == 0);
437 return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
441 * Release the global lock, returning whether there is a waiter pending.
442 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
446 acpi_release_global_lock(uint32_t *lock)
452 new = old & ~GL_BIT_MASK;
453 } while (atomic_cmpset_int(lock, old, new) == 0);
455 return (old & GL_BIT_PENDING);