AMD64 - Enable module building, sync i386 headers etc as needed.
[dragonfly.git] / sys / dev / acpica5 / Osd / OsdSynch.c
CommitLineData
5ed44076
MD
1/*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
f9d8cd12 27 * $FreeBSD: src/sys/dev/acpica/Osd/OsdSynch.c,v 1.21 2004/05/05 20:07:52 njl Exp $
a9e3d6fe 28 * $DragonFly: src/sys/dev/acpica5/Osd/OsdSynch.c,v 1.11 2007/01/25 15:12:06 y0netan1 Exp $
5ed44076
MD
29 */
30
31/*
32 * 6.1 : Mutual Exclusion and Synchronisation
33 */
34
35#include "acpi.h"
36
37#include "opt_acpi.h"
38#include <sys/kernel.h>
39#include <sys/malloc.h>
40#include <sys/sysctl.h>
7cd8d145 41#include <sys/lock.h>
5ed44076
MD
42#include <sys/thread.h>
43#include <sys/thread2.h>
e1eeedd0 44#include <sys/spinlock2.h>
5ed44076
MD
45
46#define _COMPONENT ACPI_OS_SERVICES
47ACPI_MODULE_NAME("SYNCH")
48
f9d8cd12 49MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
5ed44076 50
e1eeedd0
YT
51#define AS_LOCK(as) spin_lock_wr(&(as)->as_mtx)
52#define AS_UNLOCK(as) spin_unlock_wr(&(as)->as_mtx)
53#define AS_LOCK_DECL
5ed44076
MD
54
55/*
f9d8cd12 56 * Simple counting semaphore implemented using a mutex. (Subsequently used
5ed44076
MD
57 * in the OSI code to implement a mutex. Go figure.)
58 */
59struct acpi_semaphore {
e1eeedd0 60 struct spinlock as_mtx;
5ed44076
MD
61 UINT32 as_units;
62 UINT32 as_maxunits;
63 UINT32 as_pendings;
64 UINT32 as_resetting;
65 UINT32 as_timeouts;
66};
67
68#ifndef ACPI_NO_SEMAPHORES
69#ifndef ACPI_SEMAPHORES_MAX_PENDING
70#define ACPI_SEMAPHORES_MAX_PENDING 4
71#endif
72static int acpi_semaphore_debug = 0;
73TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
74SYSCTL_DECL(_debug_acpi);
75SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
76 &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
f9d8cd12 77#endif /* !ACPI_NO_SEMAPHORES */
5ed44076
MD
78
79ACPI_STATUS
f9d8cd12
MD
80AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
81 ACPI_HANDLE *OutHandle)
5ed44076
MD
82{
83#ifndef ACPI_NO_SEMAPHORES
84 struct acpi_semaphore *as;
85
86 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
87
88 if (OutHandle == NULL)
f9d8cd12 89 return_ACPI_STATUS (AE_BAD_PARAMETER);
5ed44076 90 if (InitialUnits > MaxUnits)
f9d8cd12 91 return_ACPI_STATUS (AE_BAD_PARAMETER);
5ed44076 92
efda3bd0 93 as = kmalloc(sizeof(*as), M_ACPISEM, M_INTWAIT | M_ZERO);
f9d8cd12 94
e1eeedd0 95 spin_init(&as->as_mtx);
5ed44076
MD
96 as->as_units = InitialUnits;
97 as->as_maxunits = MaxUnits;
98 as->as_pendings = as->as_resetting = as->as_timeouts = 0;
99
100 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
101 "created semaphore %p max %d, initial %d\n",
102 as, InitialUnits, MaxUnits));
103
104 *OutHandle = (ACPI_HANDLE)as;
5ed44076
MD
105#else
106 *OutHandle = (ACPI_HANDLE)OutHandle;
f9d8cd12
MD
107#endif /* !ACPI_NO_SEMAPHORES */
108
109 return_ACPI_STATUS (AE_OK);
5ed44076
MD
110}
111
112ACPI_STATUS
f9d8cd12 113AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
5ed44076
MD
114{
115#ifndef ACPI_NO_SEMAPHORES
5ed44076 116 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
5ed44076
MD
117
118 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
119
120 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
e1eeedd0 121 spin_uninit(&as->as_mtx);
efda3bd0 122 kfree(as, M_ACPISEM);
f9d8cd12
MD
123#endif /* !ACPI_NO_SEMAPHORES */
124
125 return_ACPI_STATUS (AE_OK);
5ed44076
MD
126}
127
5ed44076
MD
128ACPI_STATUS
129AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
130{
131#ifndef ACPI_NO_SEMAPHORES
5ed44076 132 ACPI_STATUS result;
f9d8cd12 133 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
5ed44076
MD
134 int rv, tmo;
135 struct timeval timeouttv, currenttv, timelefttv;
136 AS_LOCK_DECL;
137
138 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
139
140 if (as == NULL)
f9d8cd12 141 return_ACPI_STATUS (AE_BAD_PARAMETER);
5ed44076
MD
142
143 if (cold)
f9d8cd12 144 return_ACPI_STATUS (AE_OK);
5ed44076
MD
145
146#if 0
147 if (as->as_units < Units && as->as_timeouts > 10) {
e3869ec7 148 kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
5ed44076
MD
149 AS_LOCK(as);
150 as->as_units = as->as_maxunits;
151 if (as->as_pendings)
152 as->as_resetting = 1;
153 as->as_timeouts = 0;
154 wakeup(as);
155 AS_UNLOCK(as);
f9d8cd12 156 return_ACPI_STATUS (AE_TIME);
5ed44076
MD
157 }
158
f9d8cd12
MD
159 if (as->as_resetting)
160 return_ACPI_STATUS (AE_TIME);
5ed44076
MD
161#endif
162
163 /* a timeout of ACPI_WAIT_FOREVER means "forever" */
164 if (Timeout == ACPI_WAIT_FOREVER) {
165 tmo = 0;
166 timeouttv.tv_sec = ((0xffff/1000) + 1); /* cf. ACPI spec */
167 timeouttv.tv_usec = 0;
168 } else {
169 /* compute timeout using microseconds per tick */
170 tmo = (Timeout * 1000) / (1000000 / hz);
171 if (tmo <= 0)
172 tmo = 1;
173 timeouttv.tv_sec = Timeout / 1000;
174 timeouttv.tv_usec = (Timeout % 1000) * 1000;
175 }
176
177 /* calculate timeout value in timeval */
e1eeedd0 178 getmicrouptime(&currenttv);
5ed44076
MD
179 timevaladd(&timeouttv, &currenttv);
180
181 AS_LOCK(as);
182 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
183 "get %d units from semaphore %p (has %d), timeout %d\n",
184 Units, as, as->as_units, Timeout));
185 for (;;) {
186 if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
187 result = AE_OK;
188 break;
189 }
190 if (as->as_units >= Units) {
191 as->as_units -= Units;
192 result = AE_OK;
193 break;
194 }
195
196 /* limit number of pending treads */
197 if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
198 result = AE_TIME;
199 break;
200 }
201
202 /* if timeout values of zero is specified, return immediately */
203 if (Timeout == 0) {
204 result = AE_TIME;
205 break;
206 }
207
5ed44076
MD
208 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
209 "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
210 as, &as->as_mtx, PCATCH, tmo));
5ed44076
MD
211
212 as->as_pendings++;
213
214 if (acpi_semaphore_debug) {
e3869ec7 215 kprintf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
5ed44076
MD
216 __func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
217 }
218
219 rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
220
221 as->as_pendings--;
222
223#if 0
224 if (as->as_resetting) {
225 /* semaphore reset, return immediately */
226 if (as->as_pendings == 0) {
227 as->as_resetting = 0;
228 }
229 result = AE_TIME;
230 break;
231 }
232#endif
233
234 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
235 if (rv == EWOULDBLOCK) {
236 result = AE_TIME;
237 break;
238 }
239
240 /* check if we already awaited enough */
241 timelefttv = timeouttv;
e1eeedd0 242 getmicrouptime(&currenttv);
5ed44076
MD
243 timevalsub(&timelefttv, &currenttv);
244 if (timelefttv.tv_sec < 0) {
f9d8cd12
MD
245 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
246 as));
5ed44076
MD
247 result = AE_TIME;
248 break;
249 }
250
251 /* adjust timeout for the next sleep */
f9d8cd12
MD
252 tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
253 (1000000 / hz);
5ed44076
MD
254 if (tmo <= 0)
255 tmo = 1;
256
257 if (acpi_semaphore_debug) {
e3869ec7 258 kprintf("%s: Wakeup timeleft(%lu, %lu), tmo %u, sem %p, thread %d\n",
f9d8cd12
MD
259 __func__, timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
260 AcpiOsGetThreadId());
5ed44076
MD
261 }
262 }
263
264 if (acpi_semaphore_debug) {
265 if (result == AE_TIME && Timeout > 0) {
e3869ec7 266 kprintf("%s: Timeout %d, pending %d, semaphore %p\n",
5ed44076
MD
267 __func__, Timeout, as->as_pendings, as);
268 }
269 if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
e3869ec7 270 kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
f9d8cd12
MD
271 __func__, Units, as->as_units, as->as_pendings, as,
272 AcpiOsGetThreadId());
5ed44076
MD
273 }
274 }
275
f9d8cd12 276 if (result == AE_TIME)
5ed44076 277 as->as_timeouts++;
f9d8cd12 278 else
5ed44076 279 as->as_timeouts = 0;
5ed44076
MD
280
281 AS_UNLOCK(as);
f9d8cd12 282 return_ACPI_STATUS (result);
5ed44076 283#else
f9d8cd12
MD
284 return_ACPI_STATUS (AE_OK);
285#endif /* !ACPI_NO_SEMAPHORES */
5ed44076
MD
286}
287
288ACPI_STATUS
289AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
290{
291#ifndef ACPI_NO_SEMAPHORES
292 struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
293 AS_LOCK_DECL;
294
295 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
296
297 if (as == NULL)
298 return_ACPI_STATUS(AE_BAD_PARAMETER);
299
300 AS_LOCK(as);
301 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
302 "return %d units to semaphore %p (has %d)\n",
303 Units, as, as->as_units));
304 if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
305 as->as_units += Units;
306 if (as->as_units > as->as_maxunits)
307 as->as_units = as->as_maxunits;
308 }
309
310 if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
e3869ec7 311 kprintf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
5ed44076
MD
312 __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
313 }
314
315 wakeup(as);
316 AS_UNLOCK(as);
f9d8cd12
MD
317#endif /* !ACPI_NO_SEMAPHORES */
318
319 return_ACPI_STATUS (AE_OK);
5ed44076
MD
320}
321
e1eeedd0
YT
322struct acpi_spinlock {
323 struct spinlock lock;
324#ifdef ACPI_DEBUG_LOCKS
325 thread_t owner;
326 const char *func;
327 int line;
328#endif
329};
330
5ed44076 331ACPI_STATUS
e1eeedd0 332AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
5ed44076 333{
e1eeedd0 334 ACPI_SPINLOCK spin;
5ed44076
MD
335
336 if (OutHandle == NULL)
337 return (AE_BAD_PARAMETER);
e1eeedd0
YT
338 spin = kmalloc(sizeof(*spin), M_ACPISEM, M_INTWAIT|M_ZERO);
339 spin_init(&spin->lock);
340#ifdef ACPI_DEBUG_LOCKS
341 spin->owner = NULL;
342 spin->func = "";
343 spin->line = 0;
344#endif
345 *OutHandle = spin;
5ed44076
MD
346 return (AE_OK);
347}
348
349void
e1eeedd0 350AcpiOsDeleteLock (ACPI_SPINLOCK Spin)
5ed44076 351{
e1eeedd0
YT
352 if (Spin == NULL)
353 return;
354 spin_uninit(&Spin->lock);
355 kfree(Spin, M_ACPISEM);
5ed44076
MD
356}
357
358/*
e1eeedd0
YT
359 * OS-dependent locking primitives. These routines should be able to be
360 * called from an interrupt-handler or cpu_idle thread.
361 *
362 * NB: some of ACPI-CA functions with locking flags, say AcpiSetRegister(),
363 * are changed to unconditionally call AcpiOsAcquireLock/AcpiOsReleaseLock.
5ed44076 364 */
e1eeedd0
YT
365ACPI_CPU_FLAGS
366#ifdef ACPI_DEBUG_LOCKS
367_AcpiOsAcquireLock (ACPI_SPINLOCK Spin, const char *func, int line)
368#else
369AcpiOsAcquireLock (ACPI_SPINLOCK Spin)
370#endif
5ed44076 371{
e1eeedd0 372 spin_lock_wr(&Spin->lock);
a9e3d6fe 373
e1eeedd0
YT
374#ifdef ACPI_DEBUG_LOCKS
375 if (Spin->owner) {
376 kprintf("%p(%s:%d): acpi_spinlock %p already held by %p(%s:%d)\n",
a9e3d6fe
YT
377 curthread, func, line, Spin, Spin->owner, Spin->func,
378 Spin->line);
1e5fb84b 379 print_backtrace();
e1eeedd0 380 } else {
a9e3d6fe 381 Spin->owner = curthread;
e1eeedd0
YT
382 Spin->func = func;
383 Spin->line = line;
384 }
385#endif
386 return(0);
5ed44076
MD
387}
388
389void
e774ca6d 390AcpiOsReleaseLock (ACPI_SPINLOCK Spin, ACPI_CPU_FLAGS Flags)
5ed44076 391{
e1eeedd0
YT
392#ifdef ACPI_DEBUG_LOCKS
393 if (Flags) {
394 if (Spin->owner != NULL) {
395 kprintf("%p: acpi_spinlock %p is unexectedly held by %p(%s:%d)\n",
396 curthread, Spin, Spin->owner, Spin->func, Spin->line);
1e5fb84b 397 print_backtrace();
e1eeedd0
YT
398 } else
399 return;
400 }
401 Spin->owner = NULL;
402 Spin->func = "";
403 Spin->line = 0;
404#endif
405 spin_unlock_wr(&Spin->lock);
5ed44076 406}
f9d8cd12 407
f9d8cd12
MD
408/* Section 5.2.9.1: global lock acquire/release functions */
409#define GL_ACQUIRED (-1)
410#define GL_BUSY 0
411#define GL_BIT_PENDING 0x1
412#define GL_BIT_OWNED 0x2
413#define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED)
414
415/*
416 * Acquire the global lock. If busy, set the pending bit. The caller
417 * will wait for notification from the BIOS that the lock is available
418 * and then attempt to acquire it again.
419 */
420int
421acpi_acquire_global_lock(uint32_t *lock)
422{
423 uint32_t new, old;
424
425 do {
426 old = *lock;
427 new = ((old & ~GL_BIT_MASK) | GL_BIT_OWNED) |
428 ((old >> 1) & GL_BIT_PENDING);
e1eeedd0 429 } while (atomic_cmpset_int(lock, old, new) == 0);
f9d8cd12
MD
430
431 return ((new < GL_BIT_MASK) ? GL_ACQUIRED : GL_BUSY);
432}
433
434/*
435 * Release the global lock, returning whether there is a waiter pending.
436 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
437 * releases the lock.
438 */
439int
440acpi_release_global_lock(uint32_t *lock)
441{
442 uint32_t new, old;
443
444 do {
445 old = *lock;
446 new = old & ~GL_BIT_MASK;
e1eeedd0 447 } while (atomic_cmpset_int(lock, old, new) == 0);
f9d8cd12
MD
448
449 return (old & GL_BIT_PENDING);
450}