summaryrefslogtreecommitdiffstats
path: root/sys/dev/acpica/Osd/OsdSynch.c
diff options
context:
space:
mode:
authorjkim <jkim@FreeBSD.org>2009-06-08 20:07:16 +0000
committerjkim <jkim@FreeBSD.org>2009-06-08 20:07:16 +0000
commit8ede8714ca613975ea1e8456a3168868425b93ab (patch)
tree837dbb33fb84567301cf5e3048c33e511a446804 /sys/dev/acpica/Osd/OsdSynch.c
parent3513417bbf22285417b257df7c710671bb40761e (diff)
downloadFreeBSD-src-8ede8714ca613975ea1e8456a3168868425b93ab.zip
FreeBSD-src-8ede8714ca613975ea1e8456a3168868425b93ab.tar.gz
Rewrite OsdSynch.c to reflect the latest ACPICA more closely:
- Implement ACPI semaphore (ACPI_SEMAPHORE) with condvar(9) and mutex(9). - Implement ACPI mutex (ACPI_MUTEX) with mutex(9). - Implement ACPI lock (ACPI_SPINLOCK) with spin mutex(9).
Diffstat (limited to 'sys/dev/acpica/Osd/OsdSynch.c')
-rw-r--r--sys/dev/acpica/Osd/OsdSynch.c712
1 files changed, 445 insertions, 267 deletions
diff --git a/sys/dev/acpica/Osd/OsdSynch.c b/sys/dev/acpica/Osd/OsdSynch.c
index 68fdcaa..d1b0454 100644
--- a/sys/dev/acpica/Osd/OsdSynch.c
+++ b/sys/dev/acpica/Osd/OsdSynch.c
@@ -1,6 +1,7 @@
/*-
* Copyright (c) 2000 Michael Smith
* Copyright (c) 2000 BSDi
+ * Copyright (c) 2007-2009 Jung-uk Kim <jkim@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,366 +36,543 @@ __FBSDID("$FreeBSD$");
#include <contrib/dev/acpica/include/acpi.h>
#include <contrib/dev/acpica/include/accommon.h>
-#include "opt_acpi.h"
+#include <sys/condvar.h>
#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/sysctl.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
-#define _COMPONENT ACPI_OS_SERVICES
+#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("SYNCH")
MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore");
-#define AS_LOCK(as) mtx_lock(&(as)->as_mtx)
-#define AS_UNLOCK(as) mtx_unlock(&(as)->as_mtx)
-
/*
- * Simple counting semaphore implemented using a mutex. (Subsequently used
- * in the OSI code to implement a mutex. Go figure.)
+ * Convert milliseconds to ticks.
*/
-struct acpi_semaphore {
- struct mtx as_mtx;
- UINT32 as_units;
- UINT32 as_maxunits;
- UINT32 as_pendings;
- UINT32 as_resetting;
- UINT32 as_timeouts;
-};
+static int
+timeout2hz(UINT16 Timeout)
+{
+ struct timeval tv;
-/* Default number of maximum pending threads. */
-#ifndef ACPI_NO_SEMAPHORES
-#ifndef ACPI_SEMAPHORES_MAX_PENDING
-#define ACPI_SEMAPHORES_MAX_PENDING 4
-#endif
+ tv.tv_sec = (time_t)(Timeout / 1000);
+ tv.tv_usec = (suseconds_t)(Timeout % 1000) * 1000;
-static int acpi_semaphore_debug = 0;
-TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug);
-SYSCTL_DECL(_debug_acpi);
-SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW,
- &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages");
-#endif /* !ACPI_NO_SEMAPHORES */
+ return (tvtohz(&tv));
+}
+
+/*
+ * ACPI_SEMAPHORE
+ */
+struct acpi_sema {
+ struct mtx as_lock;
+ char as_name[32];
+ struct cv as_cv;
+ UINT32 as_maxunits;
+ UINT32 as_units;
+ int as_waiters;
+ int as_reset;
+};
ACPI_STATUS
AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
ACPI_SEMAPHORE *OutHandle)
{
-#ifndef ACPI_NO_SEMAPHORES
- struct acpi_semaphore *as;
+ struct acpi_sema *as;
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- if (OutHandle == NULL)
- return_ACPI_STATUS (AE_BAD_PARAMETER);
- if (InitialUnits > MaxUnits)
- return_ACPI_STATUS (AE_BAD_PARAMETER);
+ if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
- if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
- return_ACPI_STATUS (AE_NO_MEMORY);
+ if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
+ return_ACPI_STATUS (AE_NO_MEMORY);
- mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF);
- as->as_units = InitialUnits;
- as->as_maxunits = MaxUnits;
- as->as_pendings = as->as_resetting = as->as_timeouts = 0;
+ snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as);
+ mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF);
+ cv_init(&as->as_cv, as->as_name);
+ as->as_maxunits = MaxUnits;
+ as->as_units = InitialUnits;
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "created semaphore %p max %d, initial %d\n",
- as, InitialUnits, MaxUnits));
+ *OutHandle = (ACPI_SEMAPHORE)as;
- *OutHandle = (ACPI_HANDLE)as;
-#else
- *OutHandle = (ACPI_HANDLE)OutHandle;
-#endif /* !ACPI_NO_SEMAPHORES */
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n",
+ as->as_name, MaxUnits, InitialUnits));
- return_ACPI_STATUS (AE_OK);
+ return_ACPI_STATUS (AE_OK);
}
ACPI_STATUS
AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)
{
-#ifndef ACPI_NO_SEMAPHORES
- struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
+ struct acpi_sema *as = (struct acpi_sema *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (as == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ mtx_lock(&as->as_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", as->as_name));
+
+ if (as->as_waiters > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "reset %s, units %u, waiters %d\n",
+ as->as_name, as->as_units, as->as_waiters));
+ as->as_reset = 1;
+ cv_broadcast(&as->as_cv);
+ while (as->as_waiters > 0) {
+ if (mtx_sleep(&as->as_reset, &as->as_lock,
+ PCATCH, "acsrst", hz) == EINTR) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "failed to reset %s, waiters %d\n",
+ as->as_name, as->as_waiters));
+ mtx_unlock(&as->as_lock);
+ return_ACPI_STATUS (AE_ERROR);
+ }
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "wait %s, units %u, waiters %d\n",
+ as->as_name, as->as_units, as->as_waiters));
+ }
+ }
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ mtx_unlock(&as->as_lock);
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as));
- mtx_destroy(&as->as_mtx);
- free(Handle, M_ACPISEM);
-#endif /* !ACPI_NO_SEMAPHORES */
+ mtx_destroy(&as->as_lock);
+ cv_destroy(&as->as_cv);
+ free(as, M_ACPISEM);
- return_ACPI_STATUS (AE_OK);
+ return_ACPI_STATUS (AE_OK);
}
-/*
- * This implementation has a bug, in that it has to stall for the entire
- * timeout before it will return AE_TIME. A better implementation would
- * use getmicrotime() to correctly adjust the timeout after being woken up.
- */
+#define ACPISEM_AVAIL(s, u) ((s)->as_units >= (u))
+
ACPI_STATUS
AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout)
{
-#ifndef ACPI_NO_SEMAPHORES
- ACPI_STATUS result;
- struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
- int rv, tmo;
- struct timeval timeouttv, currenttv, timelefttv;
+ struct acpi_sema *as = (struct acpi_sema *)Handle;
+ int error, prevtick, slptick, tmo;
+ ACPI_STATUS status = AE_OK;
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- if (as == NULL)
- return_ACPI_STATUS (AE_BAD_PARAMETER);
+ if (as == NULL || Units == 0)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
- if (cold)
- return_ACPI_STATUS (AE_OK);
+ mtx_lock(&as->as_lock);
-#if 0
- if (as->as_units < Units && as->as_timeouts > 10) {
- printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as);
- AS_LOCK(as);
- as->as_units = as->as_maxunits;
- if (as->as_pendings)
- as->as_resetting = 1;
- as->as_timeouts = 0;
- wakeup(as);
- AS_UNLOCK(as);
- return_ACPI_STATUS (AE_TIME);
- }
-
- if (as->as_resetting)
- return_ACPI_STATUS (AE_TIME);
-#endif
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "get %u unit(s) from %s, units %u, waiters %d, timeout %u\n",
+ Units, as->as_name, as->as_units, as->as_waiters, Timeout));
- /* a timeout of ACPI_WAIT_FOREVER means "forever" */
- if (Timeout == ACPI_WAIT_FOREVER) {
- tmo = 0;
- timeouttv.tv_sec = ((0xffff/1000) + 1); /* cf. ACPI spec */
- timeouttv.tv_usec = 0;
- } else {
- /* compute timeout using microseconds per tick */
- tmo = (Timeout * 1000) / (1000000 / hz);
- if (tmo <= 0)
- tmo = 1;
- timeouttv.tv_sec = Timeout / 1000;
- timeouttv.tv_usec = (Timeout % 1000) * 1000;
- }
-
- /* calculate timeout value in timeval */
- getmicrotime(&currenttv);
- timevaladd(&timeouttv, &currenttv);
-
- AS_LOCK(as);
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "get %d units from semaphore %p (has %d), timeout %d\n",
- Units, as, as->as_units, Timeout));
- for (;;) {
- if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) {
- result = AE_OK;
- break;
- }
- if (as->as_units >= Units) {
- as->as_units -= Units;
- result = AE_OK;
- break;
+ if (as->as_maxunits != ACPI_NO_UNIT_LIMIT && as->as_maxunits < Units) {
+ mtx_unlock(&as->as_lock);
+ return_ACPI_STATUS (AE_LIMIT);
}
- /* limit number of pending threads */
- if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) {
- result = AE_TIME;
- break;
+ switch (Timeout) {
+ case ACPI_DO_NOT_WAIT:
+ if (!ACPISEM_AVAIL(as, Units))
+ status = AE_TIME;
+ break;
+ case ACPI_WAIT_FOREVER:
+ while (!ACPISEM_AVAIL(as, Units)) {
+ as->as_waiters++;
+ error = cv_wait_sig(&as->as_cv, &as->as_lock);
+ as->as_waiters--;
+ if (error == EINTR || as->as_reset) {
+ status = AE_ERROR;
+ break;
+ }
+ if (ACPISEM_AVAIL(as, Units))
+ break;
+ }
+ break;
+ default:
+ tmo = timeout2hz(Timeout);
+ while (!ACPISEM_AVAIL(as, Units)) {
+ prevtick = ticks;
+ as->as_waiters++;
+ error = cv_timedwait_sig(&as->as_cv, &as->as_lock, tmo);
+ as->as_waiters--;
+ if (error == EINTR || as->as_reset) {
+ status = AE_ERROR;
+ break;
+ }
+ if (ACPISEM_AVAIL(as, Units))
+ break;
+ slptick = ticks - prevtick;
+ if (slptick >= tmo || slptick < 0) {
+ status = AE_TIME;
+ break;
+ }
+ tmo -= slptick;
+ }
}
+ if (status == AE_OK)
+ as->as_units -= Units;
- /* if timeout values of zero is specified, return immediately */
- if (Timeout == 0) {
- result = AE_TIME;
- break;
- }
+ mtx_unlock(&as->as_lock);
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
- as, &as->as_mtx, PCATCH, tmo));
+ return_ACPI_STATUS (status);
+}
- as->as_pendings++;
+ACPI_STATUS
+AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
+{
+ struct acpi_sema *as = (struct acpi_sema *)Handle;
+ UINT32 i;
- if (acpi_semaphore_debug) {
- printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
- __func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId());
- }
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo);
+ if (as == NULL || Units == 0)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
- as->as_pendings--;
+ mtx_lock(&as->as_lock);
-#if 0
- if (as->as_resetting) {
- /* semaphore reset, return immediately */
- if (as->as_pendings == 0) {
- as->as_resetting = 0;
- }
- result = AE_TIME;
- break;
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "return %u units to %s, units %u, waiters %d\n",
+ Units, as->as_name, as->as_units, as->as_waiters));
+
+ if (as->as_maxunits != ACPI_NO_UNIT_LIMIT &&
+ (as->as_maxunits < Units ||
+ as->as_maxunits - Units < as->as_units)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "exceeded max units %u\n", as->as_maxunits));
+ mtx_unlock(&as->as_lock);
+ return_ACPI_STATUS (AE_LIMIT);
}
-#endif
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv));
- if (rv == EWOULDBLOCK) {
- result = AE_TIME;
- break;
- }
+ as->as_units += Units;
+ if (as->as_waiters > 0 && ACPISEM_AVAIL(as, Units))
+ for (i = 0; i < Units; i++)
+ cv_signal(&as->as_cv);
- /* check if we already awaited enough */
- timelefttv = timeouttv;
- getmicrotime(&currenttv);
- timevalsub(&timelefttv, &currenttv);
- if (timelefttv.tv_sec < 0) {
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n",
- as));
- result = AE_TIME;
- break;
- }
+ mtx_unlock(&as->as_lock);
+
+ return_ACPI_STATUS (AE_OK);
+}
- /* adjust timeout for the next sleep */
- tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) /
- (1000000 / hz);
- if (tmo <= 0)
- tmo = 1;
+#undef ACPISEM_AVAIL
- if (acpi_semaphore_debug) {
- printf("%s: Wakeup timeleft(%jd, %lu), tmo %u, sem %p, thread %d\n",
- __func__, (intmax_t)timelefttv.tv_sec, timelefttv.tv_usec, tmo, as,
- AcpiOsGetThreadId());
- }
- }
+/*
+ * ACPI_MUTEX
+ */
+struct acpi_mutex {
+ struct mtx am_lock;
+ char am_name[32];
+ struct thread *am_owner;
+ int am_nested;
+ int am_waiters;
+ int am_reset;
+};
+
+ACPI_STATUS
+AcpiOsCreateMutex(ACPI_MUTEX *OutHandle)
+{
+ struct acpi_mutex *am;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (OutHandle == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ if ((am = malloc(sizeof(*am), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
+ return_ACPI_STATUS (AE_NO_MEMORY);
+
+ snprintf(am->am_name, sizeof(am->am_name), "ACPI mutex (%p)", am);
+ mtx_init(&am->am_lock, am->am_name, NULL, MTX_DEF);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", am->am_name));
+
+ *OutHandle = (ACPI_MUTEX)am;
- if (acpi_semaphore_debug) {
- if (result == AE_TIME && Timeout > 0) {
- printf("%s: Timeout %d, pending %d, semaphore %p\n",
- __func__, Timeout, as->as_pendings, as);
+ return_ACPI_STATUS (AE_OK);
+}
+
+#define ACPIMTX_AVAIL(m) ((m)->am_owner == NULL)
+#define ACPIMTX_OWNED(m) ((m)->am_owner == curthread)
+
+void
+AcpiOsDeleteMutex(ACPI_MUTEX Handle)
+{
+ struct acpi_mutex *am = (struct acpi_mutex *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (am == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n"));
+ return_VOID;
}
- if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) {
- printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
- __func__, Units, as->as_units, as->as_pendings, as,
- AcpiOsGetThreadId());
+
+ mtx_lock(&am->am_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", am->am_name));
+
+ if (am->am_waiters > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "reset %s, owner %p\n", am->am_name, am->am_owner));
+ am->am_reset = 1;
+ wakeup(am);
+ while (am->am_waiters > 0) {
+ if (mtx_sleep(&am->am_reset, &am->am_lock,
+ PCATCH, "acmrst", hz) == EINTR) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "failed to reset %s, waiters %d\n",
+ am->am_name, am->am_waiters));
+ mtx_unlock(&am->am_lock);
+ return_VOID;
+ }
+ if (ACPIMTX_AVAIL(am))
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "wait %s, waiters %d\n",
+ am->am_name, am->am_waiters));
+ else
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "wait %s, owner %p, waiters %d\n",
+ am->am_name, am->am_owner, am->am_waiters));
+ }
}
- }
-
- if (result == AE_TIME)
- as->as_timeouts++;
- else
- as->as_timeouts = 0;
-
- AS_UNLOCK(as);
- return_ACPI_STATUS (result);
-#else
- return_ACPI_STATUS (AE_OK);
-#endif /* !ACPI_NO_SEMAPHORES */
+
+ mtx_unlock(&am->am_lock);
+
+ mtx_destroy(&am->am_lock);
+ free(am, M_ACPISEM);
}
ACPI_STATUS
-AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units)
+AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout)
{
-#ifndef ACPI_NO_SEMAPHORES
- struct acpi_semaphore *as = (struct acpi_semaphore *)Handle;
+ struct acpi_mutex *am = (struct acpi_mutex *)Handle;
+ int error, prevtick, slptick, tmo;
+ ACPI_STATUS status = AE_OK;
- ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
- if (as == NULL)
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ if (am == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
- AS_LOCK(as);
- ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
- "return %d units to semaphore %p (has %d)\n",
- Units, as, as->as_units));
- if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) {
- as->as_units += Units;
- if (as->as_units > as->as_maxunits)
- as->as_units = as->as_maxunits;
- }
+ mtx_lock(&am->am_lock);
- if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) {
- printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
- __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId());
- }
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", am->am_name));
- wakeup(as);
- AS_UNLOCK(as);
-#endif /* !ACPI_NO_SEMAPHORES */
+ if (ACPIMTX_OWNED(am)) {
+ am->am_nested++;
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "acquire nested %s, depth %d\n",
+ am->am_name, am->am_nested));
+ mtx_unlock(&am->am_lock);
+ return_ACPI_STATUS (AE_OK);
+ }
+
+ switch (Timeout) {
+ case ACPI_DO_NOT_WAIT:
+ if (!ACPIMTX_AVAIL(am))
+ status = AE_TIME;
+ break;
+ case ACPI_WAIT_FOREVER:
+ while (!ACPIMTX_AVAIL(am)) {
+ am->am_waiters++;
+ error = mtx_sleep(am, &am->am_lock, PCATCH, "acmtx", 0);
+ am->am_waiters--;
+ if (error == EINTR || am->am_reset) {
+ status = AE_ERROR;
+ break;
+ }
+ if (ACPIMTX_AVAIL(am))
+ break;
+ }
+ break;
+ default:
+ tmo = timeout2hz(Timeout);
+ while (!ACPIMTX_AVAIL(am)) {
+ prevtick = ticks;
+ am->am_waiters++;
+ error = mtx_sleep(am, &am->am_lock, PCATCH,
+ "acmtx", tmo);
+ am->am_waiters--;
+ if (error == EINTR || am->am_reset) {
+ status = AE_ERROR;
+ break;
+ }
+ if (ACPIMTX_AVAIL(am))
+ break;
+ slptick = ticks - prevtick;
+ if (slptick >= tmo || slptick < 0) {
+ status = AE_TIME;
+ break;
+ }
+ tmo -= slptick;
+ }
+ }
+ if (status == AE_OK)
+ am->am_owner = curthread;
+
+ mtx_unlock(&am->am_lock);
+
+ return_ACPI_STATUS (status);
+}
- return_ACPI_STATUS (AE_OK);
+void
+AcpiOsReleaseMutex(ACPI_MUTEX Handle)
+{
+ struct acpi_mutex *am = (struct acpi_mutex *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (am == NULL)
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot release null mutex\n"));
+
+ mtx_lock(&am->am_lock);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", am->am_name));
+
+ if (ACPIMTX_OWNED(am)) {
+ if (am->am_nested > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "release nested %s, depth %d\n",
+ am->am_name, am->am_nested));
+ am->am_nested--;
+ } else
+ am->am_owner = NULL;
+ } else {
+ if (ACPIMTX_AVAIL(am))
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "release already available %s\n", am->am_name));
+ else
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "release unowned %s from %p, depth %d\n",
+ am->am_name, am->am_owner, am->am_nested));
+ }
+ if (am->am_waiters > 0 && ACPIMTX_AVAIL(am))
+ wakeup_one(am);
+
+ mtx_unlock(&am->am_lock);
}
-/* Combined mutex + mutex name storage since the latter must persist. */
+#undef ACPIMTX_AVAIL
+#undef ACPIMTX_OWNED
+
+/*
+ * ACPI_SPINLOCK
+ */
struct acpi_spinlock {
- struct mtx lock;
- char name[32];
+ struct mtx al_lock;
+ char al_name[32];
+ int al_nested;
};
ACPI_STATUS
-AcpiOsCreateLock (ACPI_SPINLOCK *OutHandle)
+AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle)
{
- struct acpi_spinlock *h;
-
- if (OutHandle == NULL)
- return (AE_BAD_PARAMETER);
- h = malloc(sizeof(*h), M_ACPISEM, M_NOWAIT | M_ZERO);
- if (h == NULL)
- return (AE_NO_MEMORY);
-
- /* Build a unique name based on the address of the handle. */
- if (OutHandle == &AcpiGbl_GpeLock)
- snprintf(h->name, sizeof(h->name), "acpi subsystem GPE lock");
- else if (OutHandle == &AcpiGbl_HardwareLock)
- snprintf(h->name, sizeof(h->name), "acpi subsystem HW lock");
- else
- snprintf(h->name, sizeof(h->name), "acpi subsys %p", OutHandle);
- mtx_init(&h->lock, h->name, NULL, MTX_DEF|MTX_RECURSE);
- *OutHandle = (ACPI_SPINLOCK)h;
- return (AE_OK);
+ struct acpi_spinlock *al;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (OutHandle == NULL)
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+
+ if ((al = malloc(sizeof(*al), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL)
+ return_ACPI_STATUS (AE_NO_MEMORY);
+
+#ifdef ACPI_DEBUG
+ if (OutHandle == &AcpiGbl_GpeLock)
+ snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (GPE)");
+ else if (OutHandle == &AcpiGbl_HardwareLock)
+ snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (HW)");
+ else
+#endif
+ snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (%p)", al);
+ mtx_init(&al->al_lock, al->al_name, NULL, MTX_SPIN);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s\n", al->al_name));
+
+ *OutHandle = (ACPI_SPINLOCK)al;
+
+ return_ACPI_STATUS (AE_OK);
}
void
-AcpiOsDeleteLock (ACPI_SPINLOCK Handle)
+AcpiOsDeleteLock(ACPI_SPINLOCK Handle)
{
- struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
+ struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (al == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot delete null spinlock\n"));
+ return_VOID;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete %s\n", al->al_name));
- if (Handle == NULL)
- return;
- mtx_destroy(&h->lock);
- free(h, M_ACPISEM);
+ mtx_destroy(&al->al_lock);
+ free(al, M_ACPISEM);
}
-/*
- * The Flags parameter seems to state whether or not caller is an ISR
- * (and thus can't block) but since we have ithreads, we don't worry
- * about potentially blocking.
- */
ACPI_CPU_FLAGS
-AcpiOsAcquireLock (ACPI_SPINLOCK Handle)
+AcpiOsAcquireLock(ACPI_SPINLOCK Handle)
{
- struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
+ struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (al == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot acquire null spinlock\n"));
+ return (0);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", al->al_name));
+
+ if (mtx_owned(&al->al_lock)) {
+ al->al_nested++;
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "acquire nested %s, depth %d\n",
+ al->al_name, al->al_nested));
+ } else
+ mtx_lock_spin(&al->al_lock);
- if (Handle == NULL)
return (0);
- mtx_lock(&h->lock);
- return (0);
}
void
-AcpiOsReleaseLock (ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
+AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
{
- struct acpi_spinlock *h = (struct acpi_spinlock *)Handle;
+ struct acpi_spinlock *al = (struct acpi_spinlock *)Handle;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+
+ if (al == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot release null spinlock\n"));
+ return_VOID;
+ }
- if (Handle == NULL)
- return;
- mtx_unlock(&h->lock);
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name));
+
+ if (mtx_owned(&al->al_lock)) {
+ if (al->al_nested > 0) {
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "release nested %s, depth %d\n",
+ al->al_name, al->al_nested));
+ al->al_nested--;
+ } else
+ mtx_unlock_spin(&al->al_lock);
+ } else
+ ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
+ "cannot release unowned %s\n", al->al_name));
}
-/* Section 5.2.9.1: global lock acquire/release functions */
-#define GL_ACQUIRED (-1)
-#define GL_BUSY 0
-#define GL_BIT_PENDING 0x1
-#define GL_BIT_OWNED 0x2
-#define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED)
+/* Section 5.2.10.1: global lock acquire/release functions */
+#define GL_ACQUIRED (-1)
+#define GL_BUSY 0
+#define GL_BIT_PENDING 0x01
+#define GL_BIT_OWNED 0x02
+#define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED)
/*
* Acquire the global lock. If busy, set the pending bit. The caller
@@ -404,7 +582,7 @@ AcpiOsReleaseLock (ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
int
acpi_acquire_global_lock(uint32_t *lock)
{
- uint32_t new, old;
+ uint32_t new, old;
do {
old = *lock;
@@ -423,7 +601,7 @@ acpi_acquire_global_lock(uint32_t *lock)
int
acpi_release_global_lock(uint32_t *lock)
{
- uint32_t new, old;
+ uint32_t new, old;
do {
old = *lock;
OpenPOWER on IntegriCloud