summaryrefslogtreecommitdiffstats
path: root/drivers/acpi/events
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/events')
-rw-r--r--drivers/acpi/events/Makefile9
-rw-r--r--drivers/acpi/events/evevent.c312
-rw-r--r--drivers/acpi/events/evgpe.c725
-rw-r--r--drivers/acpi/events/evgpeblk.c1215
-rw-r--r--drivers/acpi/events/evmisc.c631
-rw-r--r--drivers/acpi/events/evregion.c1074
-rw-r--r--drivers/acpi/events/evrgnini.c688
-rw-r--r--drivers/acpi/events/evsci.c184
-rw-r--r--drivers/acpi/events/evxface.c820
-rw-r--r--drivers/acpi/events/evxfevnt.c719
-rw-r--r--drivers/acpi/events/evxfregn.c253
11 files changed, 6630 insertions, 0 deletions
diff --git a/drivers/acpi/events/Makefile b/drivers/acpi/events/Makefile
new file mode 100644
index 0000000..d29f2ee
--- /dev/null
+++ b/drivers/acpi/events/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for all Linux ACPI interpreter subdirectories
+#
+
+obj-y := evevent.o evregion.o evsci.o evxfevnt.o \
+ evmisc.o evrgnini.o evxface.o evxfregn.o \
+ evgpe.o evgpeblk.o
+
+EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
new file mode 100644
index 0000000..c56c5c6
--- /dev/null
+++ b/drivers/acpi/events/evevent.c
@@ -0,0 +1,312 @@
+/******************************************************************************
+ *
+ * Module Name: evevent - Fixed Event handling and dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evevent")
+
+/* Local prototypes */
+static acpi_status acpi_ev_fixed_event_initialize(void);
+
+static u32 acpi_ev_fixed_event_dispatch(u32 event);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_initialize_events
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize global data structures for ACPI events (Fixed, GPE)
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_initialize_events(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_initialize_events);
+
+ /*
+ * Initialize the Fixed and General Purpose Events. This is done prior to
+ * enabling SCIs to prevent interrupts from occurring before the handlers are
+ * installed.
+ */
+ status = acpi_ev_fixed_event_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to initialize fixed events"));
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ev_gpe_initialize();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to initialize general purpose events"));
+ return_ACPI_STATUS(status);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_fadt_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Completes initialization of the FADT-defined GPE blocks
+ * (0 and 1). This causes the _PRW methods to be run, so the HW
+ * must be fully initialized at this point, including global lock
+ * support.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_fadt_gpes(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_install_fadt_gpes);
+
+ /* Namespace must be locked */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* FADT GPE Block 0 */
+
+ (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
+ acpi_gbl_gpe_fadt_blocks[0]);
+
+ /* FADT GPE Block 1 */
+
+ (void)acpi_ev_initialize_gpe_block(acpi_gbl_fadt_gpe_device,
+ acpi_gbl_gpe_fadt_blocks[1]);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_xrupt_handlers
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install interrupt handlers for the SCI and Global Lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_xrupt_handlers(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
+
+ /* Install the SCI handler */
+
+ status = acpi_ev_install_sci_handler();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to install System Control Interrupt handler"));
+ return_ACPI_STATUS(status);
+ }
+
+ /* Install the handler for the Global Lock */
+
+ status = acpi_ev_init_global_lock_handler();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to initialize Global Lock handler"));
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_events_initialized = TRUE;
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_fixed_event_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install the fixed event handlers and enable the fixed events.
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ev_fixed_event_initialize(void)
+{
+ u32 i;
+ acpi_status status;
+
+ /*
+ * Initialize the structure that keeps track of fixed event handlers
+ * and enable the fixed events.
+ */
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ acpi_gbl_fixed_event_handlers[i].handler = NULL;
+ acpi_gbl_fixed_event_handlers[i].context = NULL;
+
+ /* Enable the fixed event */
+
+ if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
+ status =
+ acpi_set_register(acpi_gbl_fixed_event_info[i].
+ enable_register_id, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_fixed_event_detect
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Checks the PM status register for active fixed events
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_fixed_event_detect(void)
+{
+ u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+ u32 fixed_status;
+ u32 fixed_enable;
+ u32 i;
+
+ ACPI_FUNCTION_NAME(ev_fixed_event_detect);
+
+ /*
+ * Read the fixed feature status and enable registers, as all the cases
+ * depend on their values. Ignore errors here.
+ */
+ (void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
+ (void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+ "Fixed Event Block: Enable %08X Status %08X\n",
+ fixed_enable, fixed_status));
+
+ /*
+ * Check for all possible Fixed Events and dispatch those that are active
+ */
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+
+ /* Both the status and enable bits must be on for this event */
+
+ if ((fixed_status & acpi_gbl_fixed_event_info[i].
+ status_bit_mask)
+ && (fixed_enable & acpi_gbl_fixed_event_info[i].
+ enable_bit_mask)) {
+
+ /* Found an active (signalled) event */
+ acpi_os_fixed_event_count(i);
+ int_status |= acpi_ev_fixed_event_dispatch(i);
+ }
+ }
+
+ return (int_status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_fixed_event_dispatch
+ *
+ * PARAMETERS: Event - Event type
+ *
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Clears the status bit for the requested event, calls the
+ * handler that previously registered for the event.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_fixed_event_dispatch(u32 event)
+{
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* Clear the status bit */
+
+ (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
+ status_register_id, 1);
+
+ /*
+ * Make sure we've got a handler. If not, report an error.
+ * The event is disabled to prevent further interrupts.
+ */
+ if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
+ (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, 0);
+
+ ACPI_ERROR((AE_INFO,
+ "No installed handler for fixed event [%08X]",
+ event));
+
+ return (ACPI_INTERRUPT_NOT_HANDLED);
+ }
+
+ /* Invoke the Fixed Event handler */
+
+ return ((acpi_gbl_fixed_event_handlers[event].
+ handler) (acpi_gbl_fixed_event_handlers[event].context));
+}
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
new file mode 100644
index 0000000..f45c74f
--- /dev/null
+++ b/drivers/acpi/events/evgpe.c
@@ -0,0 +1,725 @@
+/******************************************************************************
+ *
+ * Module Name: evgpe - General Purpose Event handling and dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpe")
+
+/* Local prototypes */
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_set_gpe_type
+ *
+ * PARAMETERS: gpe_event_info - GPE to set
+ * Type - New type
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_set_gpe_type);
+
+ /* Validate type and update register enable masks */
+
+ switch (type) {
+ case ACPI_GPE_TYPE_WAKE:
+ case ACPI_GPE_TYPE_RUNTIME:
+ case ACPI_GPE_TYPE_WAKE_RUN:
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Disable the GPE if currently enabled */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+
+ /* Type was validated above */
+
+ gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; /* Clear type bits */
+ gpe_event_info->flags |= type; /* Insert type */
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_update_gpe_enable_masks
+ *
+ * PARAMETERS: gpe_event_info - GPE to update
+ * Type - What to do: ACPI_GPE_DISABLE or
+ * ACPI_GPE_ENABLE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Updates GPE register enable masks based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
+ u8 type)
+{
+ struct acpi_gpe_register_info *gpe_register_info;
+ u8 register_bit;
+
+ ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
+
+ gpe_register_info = gpe_event_info->register_info;
+ if (!gpe_register_info) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+ register_bit = (u8)
+ (1 <<
+ (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
+
+ /* 1) Disable case. Simply clear all enable bits */
+
+ if (type == ACPI_GPE_DISABLE) {
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+ register_bit);
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* 2) Enable case. Set/Clear the appropriate enable bits */
+
+ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+ case ACPI_GPE_TYPE_WAKE:
+ ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
+ break;
+
+ case ACPI_GPE_TYPE_RUNTIME:
+ ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
+ register_bit);
+ ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ break;
+
+ case ACPI_GPE_TYPE_WAKE_RUN:
+ ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
+ ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_enable_gpe
+ *
+ * PARAMETERS: gpe_event_info - GPE to enable
+ * write_to_hardware - Enable now, or just mark data structs
+ * (WAKE GPEs should be deferred)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable a GPE based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
+ u8 write_to_hardware)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_enable_gpe);
+
+ /* Make sure HW enable masks are updated */
+
+ status =
+ acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Mark wake-enabled or HW enable, or both */
+
+ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+ case ACPI_GPE_TYPE_WAKE:
+
+ ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+ break;
+
+ case ACPI_GPE_TYPE_WAKE_RUN:
+
+ ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+
+ /*lint -fallthrough */
+
+ case ACPI_GPE_TYPE_RUNTIME:
+
+ ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
+
+ if (write_to_hardware) {
+
+ /* Clear the GPE (of stale events), then enable it */
+
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Enable the requested runtime GPE */
+
+ status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
+ }
+ break;
+
+ default:
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_disable_gpe
+ *
+ * PARAMETERS: gpe_event_info - GPE to disable
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable a GPE based on the GPE type
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_disable_gpe);
+
+ /* Make sure HW enable masks are updated */
+
+ status =
+ acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Clear the appropriate enabled flags for this GPE */
+
+ switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
+ case ACPI_GPE_TYPE_WAKE:
+ ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+ break;
+
+ case ACPI_GPE_TYPE_WAKE_RUN:
+ ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
+
+ /* fallthrough */
+
+ case ACPI_GPE_TYPE_RUNTIME:
+
+ /* Disable the requested runtime GPE */
+
+ ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * Even if we don't know the GPE type, make sure that we always
+ * disable it. low_disable_gpe will just clear the enable bit for this
+ * GPE and write it. It will not write out the current GPE enable mask,
+ * since this may inadvertently enable GPEs too early, if a rogue GPE has
+ * come in during ACPICA initialization - possibly as a result of AML or
+ * other code that has enabled the GPE.
+ */
+ status = acpi_hw_low_disable_gpe(gpe_event_info);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_get_gpe_event_info
+ *
+ * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
+ * gpe_number - Raw GPE number
+ *
+ * RETURN: A GPE event_info struct. NULL if not a valid GPE
+ *
+ * DESCRIPTION: Returns the event_info struct associated with this GPE.
+ * Validates the gpe_block and the gpe_number
+ *
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
+ u32 gpe_number)
+{
+ union acpi_operand_object *obj_desc;
+ struct acpi_gpe_block_info *gpe_block;
+ u32 i;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* A NULL gpe_block means use the FADT-defined GPE block(s) */
+
+ if (!gpe_device) {
+
+ /* Examine GPE Block 0 and 1 (These blocks are permanent) */
+
+ for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
+ gpe_block = acpi_gbl_gpe_fadt_blocks[i];
+ if (gpe_block) {
+ if ((gpe_number >= gpe_block->block_base_number)
+ && (gpe_number <
+ gpe_block->block_base_number +
+ (gpe_block->register_count * 8))) {
+ return (&gpe_block->
+ event_info[gpe_number -
+ gpe_block->
+ block_base_number]);
+ }
+ }
+ }
+
+ /* The gpe_number was not in the range of either FADT GPE block */
+
+ return (NULL);
+ }
+
+ /* A Non-NULL gpe_device means this is a GPE Block Device */
+
+ obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
+ gpe_device);
+ if (!obj_desc || !obj_desc->device.gpe_block) {
+ return (NULL);
+ }
+
+ gpe_block = obj_desc->device.gpe_block;
+
+ if ((gpe_number >= gpe_block->block_base_number) &&
+ (gpe_number <
+ gpe_block->block_base_number + (gpe_block->register_count * 8))) {
+ return (&gpe_block->
+ event_info[gpe_number - gpe_block->block_base_number]);
+ }
+
+ return (NULL);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_detect
+ *
+ * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt.
+ * Can have multiple GPE blocks attached.
+ *
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Detect if any GP events have occurred. This function is
+ * executed at interrupt level.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
+{
+ acpi_status status;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_register_info *gpe_register_info;
+ u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+ u8 enabled_status_byte;
+ u32 status_reg;
+ u32 enable_reg;
+ acpi_cpu_flags flags;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_NAME(ev_gpe_detect);
+
+ /* Check for the case where there are no GPEs */
+
+ if (!gpe_xrupt_list) {
+ return (int_status);
+ }
+
+ /*
+ * We need to obtain the GPE lock for both the data structs and registers
+ * Note: Not necessary to obtain the hardware lock, since the GPE registers
+ * are owned by the gpe_lock.
+ */
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Examine all GPE blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_list->gpe_block_list_head;
+ while (gpe_block) {
+ /*
+ * Read all of the 8-bit GPE status and enable registers
+ * in this GPE block, saving all of them.
+ * Find all currently active GP events.
+ */
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Get the next status/enable pair */
+
+ gpe_register_info = &gpe_block->register_info[i];
+
+ /* Read the Status Register */
+
+ status =
+ acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH,
+ &status_reg,
+ &gpe_register_info->
+ status_address);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Read the Enable Register */
+
+ status =
+ acpi_hw_low_level_read(ACPI_GPE_REGISTER_WIDTH,
+ &enable_reg,
+ &gpe_register_info->
+ enable_address);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+ "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
+ gpe_register_info->base_gpe_number,
+ status_reg, enable_reg));
+
+ /* Check if there is anything active at all in this register */
+
+ enabled_status_byte = (u8) (status_reg & enable_reg);
+ if (!enabled_status_byte) {
+
+ /* No active GPEs in this register, move on */
+
+ continue;
+ }
+
+ /* Now look at the individual GPEs in this byte register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+
+ /* Examine one GPE bit */
+
+ if (enabled_status_byte & (1 << j)) {
+ /*
+ * Found an active GPE. Dispatch the event to a handler
+ * or method.
+ */
+ int_status |=
+ acpi_ev_gpe_dispatch(&gpe_block->
+ event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
+ }
+ }
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ unlock_and_exit:
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return (int_status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_asynch_execute_gpe_method
+ *
+ * PARAMETERS: Context (gpe_event_info) - Info for this GPE
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Perform the actual execution of a GPE control method. This
+ * function is called from an invocation of acpi_os_execute and
+ * therefore does NOT execute at interrupt level - so that
+ * the control method itself is not executed in the context of
+ * an interrupt handler.
+ *
+ ******************************************************************************/
+static void acpi_ev_asynch_enable_gpe(void *context);
+
+static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info = (void *)context;
+ acpi_status status;
+ struct acpi_gpe_event_info local_gpe_event_info;
+ struct acpi_evaluate_info *info;
+
+ ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /* Must revalidate the gpe_number/gpe_block */
+
+ if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
+ status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_VOID;
+ }
+
+ /* Set the GPE flags for return to enabled state */
+
+ (void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
+
+ /*
+ * Take a snapshot of the GPE info for this level - we copy the
+ * info to prevent a race condition with remove_handler/remove_block.
+ */
+ ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
+ sizeof(struct acpi_gpe_event_info));
+
+ status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+
+ /*
+ * Must check for control method type dispatch one more
+ * time to avoid race with ev_gpe_install_handler
+ */
+ if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_METHOD) {
+
+ /* Allocate the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ status = AE_NO_MEMORY;
+ } else {
+ /*
+ * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
+ * control method that corresponds to this GPE
+ */
+ info->prefix_node =
+ local_gpe_event_info.dispatch.method_node;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info);
+ ACPI_FREE(info);
+ }
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "while evaluating GPE method [%4.4s]",
+ acpi_ut_get_node_name
+ (local_gpe_event_info.dispatch.
+ method_node)));
+ }
+ }
+ /* Defer enabling of GPE until all notify handlers are done */
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
+ gpe_event_info);
+ return_VOID;
+}
+
+static void acpi_ev_asynch_enable_gpe(void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info = context;
+ acpi_status status;
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+ ACPI_GPE_LEVEL_TRIGGERED) {
+ /*
+ * GPE is level-triggered, we clear the GPE status bit after
+ * handling the event.
+ */
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+ }
+
+ /* Enable this GPE */
+ (void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_dispatch
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ * gpe_number - Number relative to the parent GPE block
+ *
+ * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
+ *
+ * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
+ * or method (e.g. _Lxx/_Exx) handler.
+ *
+ * This function executes at interrupt level.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
+
+ acpi_os_gpe_count(gpe_number);
+
+ /*
+ * If edge-triggered, clear the GPE status bit now. Note that
+ * level-triggered events are cleared after the GPE is serviced.
+ */
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+ ACPI_GPE_EDGE_TRIGGERED) {
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to clear GPE[%2X]",
+ gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+ }
+
+ /*
+ * Dispatch the GPE to either an installed handler, or the control method
+ * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
+ * it and do not attempt to run the method. If there is neither a handler
+ * nor a method, we disable this GPE to prevent further such pointless
+ * events from firing.
+ */
+ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+ case ACPI_GPE_DISPATCH_HANDLER:
+
+ /*
+ * Invoke the installed handler (at interrupt level)
+ * Ignore return status for now. TBD: leave GPE disabled on error?
+ */
+ (void)gpe_event_info->dispatch.handler->address(gpe_event_info->
+ dispatch.
+ handler->
+ context);
+
+ /* It is now safe to clear level-triggered events. */
+
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+ ACPI_GPE_LEVEL_TRIGGERED) {
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to clear GPE[%2X]",
+ gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+ }
+ break;
+
+ case ACPI_GPE_DISPATCH_METHOD:
+
+ /*
+ * Disable the GPE, so it doesn't keep firing before the method has a
+ * chance to run (it runs asynchronously with interrupts enabled).
+ */
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to disable GPE[%2X]",
+ gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+
+ /*
+ * Execute the method associated with the GPE
+ * NOTE: Level-triggered GPEs are cleared after the method completes.
+ */
+ status = acpi_os_execute(OSL_GPE_HANDLER,
+ acpi_ev_asynch_execute_gpe_method,
+ gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to queue handler for GPE[%2X] - event disabled",
+ gpe_number));
+ }
+ break;
+
+ default:
+
+ /* No handler or method to run! */
+
+ ACPI_ERROR((AE_INFO,
+ "No handler or method for GPE[%2X], disabling event",
+ gpe_number));
+
+ /*
+ * Disable the GPE. The GPE will remain disabled until the ACPI
+ * Core Subsystem is restarted, or a handler is installed.
+ */
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to disable GPE[%2X]",
+ gpe_number));
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+ break;
+ }
+
+ return_UINT32(ACPI_INTERRUPT_HANDLED);
+}
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
new file mode 100644
index 0000000..73c058e
--- /dev/null
+++ b/drivers/acpi/events/evgpeblk.c
@@ -0,0 +1,1215 @@
+/******************************************************************************
+ *
+ * Module Name: evgpeblk - GPE block creation and initialization.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evgpeblk")
+
+/* Local prototypes */
+static acpi_status
+acpi_ev_save_method_info(acpi_handle obj_handle,
+ u32 level, void *obj_desc, void **return_value);
+
+static acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *info, void **return_value);
+
+static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
+ interrupt_number);
+
+static acpi_status
+acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
+
+static acpi_status
+acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
+ u32 interrupt_number);
+
+static acpi_status
+acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_valid_gpe_event
+ *
+ * PARAMETERS: gpe_event_info - Info for this GPE
+ *
+ * RETURN: TRUE if the gpe_event is valid
+ *
+ * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /* No need for spin lock since we are not changing any list elements */
+
+ /* Walk the GPE interrupt levels */
+
+ gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_block) {
+ gpe_block = gpe_xrupt_block->gpe_block_list_head;
+
+ /* Walk the GPE blocks on this interrupt level */
+
+ while (gpe_block) {
+ if ((&gpe_block->event_info[0] <= gpe_event_info) &&
+ (&gpe_block->
+ event_info[((acpi_size) gpe_block->
+ register_count) * 8] >
+ gpe_event_info)) {
+ return (TRUE);
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_block = gpe_xrupt_block->next;
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_walk_gpe_list
+ *
+ * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Walk the GPE lists.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback)
+{
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Walk the interrupt level descriptor list */
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+
+ /* Walk all Gpe Blocks attached to this interrupt level */
+
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+
+ /* One callback per GPE block */
+
+ status = gpe_walk_callback(gpe_xrupt_info, gpe_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ gpe_block = gpe_block->next;
+ }
+
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_handlers
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
+ * Used only prior to termination.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Now look at the individual GPEs in this byte register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+ gpe_event_info =
+ &gpe_block->
+ event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH) + j];
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+ ACPI_FREE(gpe_event_info->dispatch.handler);
+ gpe_event_info->dispatch.handler = NULL;
+ gpe_event_info->flags &=
+ ~ACPI_GPE_DISPATCH_MASK;
+ }
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_save_method_info
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * control method under the _GPE portion of the namespace.
+ * Extract the name and GPE type from the object, saving this
+ * information for quick lookup during GPE dispatch
+ *
+ * The name of each GPE control method is of the form:
+ * "_Lxx" or "_Exx"
+ * Where:
+ * L - means that the GPE is level triggered
+ * E - means that the GPE is edge triggered
+ * xx - is the GPE number [in HEX]
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_save_method_info(acpi_handle obj_handle,
+ u32 level, void *obj_desc, void **return_value)
+{
+ struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
+ struct acpi_gpe_event_info *gpe_event_info;
+ u32 gpe_number;
+ char name[ACPI_NAME_SIZE + 1];
+ u8 type;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_save_method_info);
+
+ /*
+ * _Lxx and _Exx GPE method support
+ *
+ * 1) Extract the name from the object and convert to a string
+ */
+ ACPI_MOVE_32_TO_32(name,
+ &((struct acpi_namespace_node *)obj_handle)->name.
+ integer);
+ name[ACPI_NAME_SIZE] = 0;
+
+ /*
+ * 2) Edge/Level determination is based on the 2nd character
+ * of the method name
+ *
+ * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
+ * if a _PRW object is found that points to this GPE.
+ */
+ switch (name[1]) {
+ case 'L':
+ type = ACPI_GPE_LEVEL_TRIGGERED;
+ break;
+
+ case 'E':
+ type = ACPI_GPE_EDGE_TRIGGERED;
+ break;
+
+ default:
+ /* Unknown method type, just ignore it! */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Ignoring unknown GPE method type: %s (name not of form _Lxx or _Exx)",
+ name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Convert the last two characters of the name to the GPE Number */
+
+ gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
+ if (gpe_number == ACPI_UINT32_MAX) {
+
+ /* Conversion failed; invalid method, just ignore it */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Could not extract GPE number from name: %s (name is not of form _Lxx or _Exx)",
+ name));
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Ensure that we have a valid GPE number for this GPE block */
+
+ if ((gpe_number < gpe_block->block_base_number) ||
+ (gpe_number >=
+ (gpe_block->block_base_number +
+ (gpe_block->register_count * 8)))) {
+ /*
+ * Not valid for this GPE block, just ignore it
+ * However, it may be valid for a different GPE block, since GPE0 and GPE1
+ * methods both appear under \_GPE.
+ */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Now we can add this information to the gpe_event_info block
+ * for use during dispatch of this GPE. Default type is RUNTIME, although
+ * this may change when the _PRW methods are executed later.
+ */
+ gpe_event_info =
+ &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
+
+ gpe_event_info->flags = (u8)
+ (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
+
+ gpe_event_info->dispatch.method_node =
+ (struct acpi_namespace_node *)obj_handle;
+
+ /* Update enable mask, but don't enable the HW GPE as of yet */
+
+ status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
+ "Registered GPE method %s as GPE number 0x%.2X\n",
+ name, gpe_number));
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_prw_and_gpe
+ *
+ * PARAMETERS: Callback from walk_namespace
+ *
+ * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
+ * not aborted on a single _PRW failure.
+ *
+ * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
+ * Device. Run the _PRW method. If present, extract the GPE
+ * number and mark the GPE as a WAKE GPE.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
+ u32 level, void *info, void **return_value)
+{
+ struct acpi_gpe_walk_info *gpe_info = (void *)info;
+ struct acpi_namespace_node *gpe_device;
+ struct acpi_gpe_block_info *gpe_block;
+ struct acpi_namespace_node *target_gpe_device;
+ struct acpi_gpe_event_info *gpe_event_info;
+ union acpi_operand_object *pkg_desc;
+ union acpi_operand_object *obj_desc;
+ u32 gpe_number;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
+
+ /* Check for a _PRW method under this device */
+
+ status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
+ ACPI_BTYPE_PACKAGE, &pkg_desc);
+ if (ACPI_FAILURE(status)) {
+
+ /* Ignore all errors from _PRW, we don't want to abort the subsystem */
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* The returned _PRW package must have at least two elements */
+
+ if (pkg_desc->package.count < 2) {
+ goto cleanup;
+ }
+
+ /* Extract pointers from the input context */
+
+ gpe_device = gpe_info->gpe_device;
+ gpe_block = gpe_info->gpe_block;
+
+ /*
+ * The _PRW object must return a package, we are only interested
+ * in the first element
+ */
+ obj_desc = pkg_desc->package.elements[0];
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_INTEGER) {
+
+ /* Use FADT-defined GPE device (from definition of _PRW) */
+
+ target_gpe_device = acpi_gbl_fadt_gpe_device;
+
+ /* Integer is the GPE number in the FADT described GPE blocks */
+
+ gpe_number = (u32) obj_desc->integer.value;
+ } else if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_PACKAGE) {
+
+ /* Package contains a GPE reference and GPE number within a GPE block */
+
+ if ((obj_desc->package.count < 2) ||
+ (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[0]) !=
+ ACPI_TYPE_LOCAL_REFERENCE)
+ || (ACPI_GET_OBJECT_TYPE(obj_desc->package.elements[1]) !=
+ ACPI_TYPE_INTEGER)) {
+ goto cleanup;
+ }
+
+ /* Get GPE block reference and decode */
+
+ target_gpe_device =
+ obj_desc->package.elements[0]->reference.node;
+ gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
+ } else {
+ /* Unknown type, just ignore it */
+
+ goto cleanup;
+ }
+
+ /*
+ * Is this GPE within this block?
+ *
+ * TRUE iff these conditions are true:
+ * 1) The GPE devices match.
+ * 2) The GPE index(number) is within the range of the Gpe Block
+ * associated with the GPE device.
+ */
+ if ((gpe_device == target_gpe_device) &&
+ (gpe_number >= gpe_block->block_base_number) &&
+ (gpe_number <
+ gpe_block->block_base_number + (gpe_block->register_count * 8))) {
+ gpe_event_info =
+ &gpe_block->event_info[gpe_number -
+ gpe_block->block_base_number];
+
+ /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
+
+ gpe_event_info->flags &=
+ ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
+
+ status =
+ acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+ status =
+ acpi_ev_update_gpe_enable_masks(gpe_event_info,
+ ACPI_GPE_DISABLE);
+ }
+
+ cleanup:
+ acpi_ut_remove_reference(pkg_desc);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_get_gpe_xrupt_block
+ *
+ * PARAMETERS: interrupt_number - Interrupt for a GPE block
+ *
+ * RETURN: A GPE interrupt block
+ *
+ * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
+ * block per unique interrupt level used for GPEs.
+ * Should be called only when the GPE lists are semaphore locked
+ * and not subject to change.
+ *
+ ******************************************************************************/
+
+static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
+ interrupt_number)
+{
+ struct acpi_gpe_xrupt_info *next_gpe_xrupt;
+ struct acpi_gpe_xrupt_info *gpe_xrupt;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
+
+ /* No need for lock since we are not changing any list elements here */
+
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt) {
+ if (next_gpe_xrupt->interrupt_number == interrupt_number) {
+ return_PTR(next_gpe_xrupt);
+ }
+
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ /* Not found, must allocate a new xrupt descriptor */
+
+ gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
+ if (!gpe_xrupt) {
+ return_PTR(NULL);
+ }
+
+ gpe_xrupt->interrupt_number = interrupt_number;
+
+ /* Install new interrupt descriptor with spin lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (acpi_gbl_gpe_xrupt_list_head) {
+ next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
+ while (next_gpe_xrupt->next) {
+ next_gpe_xrupt = next_gpe_xrupt->next;
+ }
+
+ next_gpe_xrupt->next = gpe_xrupt;
+ gpe_xrupt->previous = next_gpe_xrupt;
+ } else {
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Install new interrupt handler if not SCI_INT */
+
+ if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
+ status = acpi_os_install_interrupt_handler(interrupt_number,
+ acpi_ev_gpe_xrupt_handler,
+ gpe_xrupt);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not install GPE interrupt handler at level 0x%X",
+ interrupt_number));
+ return_PTR(NULL);
+ }
+ }
+
+ return_PTR(gpe_xrupt);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_xrupt
+ *
+ * PARAMETERS: gpe_xrupt - A GPE interrupt info block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
+ * interrupt handler if not the SCI interrupt.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
+
+ /* We never want to remove the SCI interrupt handler */
+
+ if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
+ gpe_xrupt->gpe_block_list_head = NULL;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Disable this interrupt */
+
+ status =
+ acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
+ acpi_ev_gpe_xrupt_handler);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Unlink the interrupt block with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_xrupt->previous) {
+ gpe_xrupt->previous->next = gpe_xrupt->next;
+ } else {
+ /* No previous, update list head */
+
+ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
+ }
+
+ if (gpe_xrupt->next) {
+ gpe_xrupt->next->previous = gpe_xrupt->previous;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Free the block */
+
+ ACPI_FREE(gpe_xrupt);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_gpe_block
+ *
+ * PARAMETERS: gpe_block - New GPE block
+ * interrupt_number - Xrupt to be associated with this GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install new GPE block with mutex support
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
+ u32 interrupt_number)
+{
+ struct acpi_gpe_block_info *next_gpe_block;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_block;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_install_gpe_block);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
+ if (!gpe_xrupt_block) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Install the new block at the end of the list with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_xrupt_block->gpe_block_list_head) {
+ next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
+ while (next_gpe_block->next) {
+ next_gpe_block = next_gpe_block->next;
+ }
+
+ next_gpe_block->next = gpe_block;
+ gpe_block->previous = next_gpe_block;
+ } else {
+ gpe_xrupt_block->gpe_block_list_head = gpe_block;
+ }
+
+ gpe_block->xrupt_block = gpe_xrupt_block;
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ unlock_and_exit:
+ status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_delete_gpe_block
+ *
+ * PARAMETERS: gpe_block - Existing GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a GPE block
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
+{
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(ev_install_gpe_block);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Disable all GPEs in this block */
+
+ status = acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block);
+
+ if (!gpe_block->previous && !gpe_block->next) {
+
+ /* This is the last gpe_block on this interrupt */
+
+ status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ } else {
+ /* Remove the block on this interrupt with lock */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ if (gpe_block->previous) {
+ gpe_block->previous->next = gpe_block->next;
+ } else {
+ gpe_block->xrupt_block->gpe_block_list_head =
+ gpe_block->next;
+ }
+
+ if (gpe_block->next) {
+ gpe_block->next->previous = gpe_block->previous;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ }
+
+ /* Free the gpe_block */
+
+ ACPI_FREE(gpe_block->register_info);
+ ACPI_FREE(gpe_block->event_info);
+ ACPI_FREE(gpe_block);
+
+ unlock_and_exit:
+ status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_create_gpe_info_blocks
+ *
+ * PARAMETERS: gpe_block - New GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
+{
+ struct acpi_gpe_register_info *gpe_register_info = NULL;
+ struct acpi_gpe_event_info *gpe_event_info = NULL;
+ struct acpi_gpe_event_info *this_event;
+ struct acpi_gpe_register_info *this_register;
+ u32 i;
+ u32 j;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
+
+ /* Allocate the GPE register information block */
+
+ gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
+ register_count *
+ sizeof(struct
+ acpi_gpe_register_info));
+ if (!gpe_register_info) {
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate the GpeRegisterInfo table"));
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * Allocate the GPE event_info block. There are eight distinct GPEs
+ * per register. Initialization to zeros is sufficient.
+ */
+ gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
+ register_count *
+ ACPI_GPE_REGISTER_WIDTH) *
+ sizeof(struct
+ acpi_gpe_event_info));
+ if (!gpe_event_info) {
+ ACPI_ERROR((AE_INFO,
+ "Could not allocate the GpeEventInfo table"));
+ status = AE_NO_MEMORY;
+ goto error_exit;
+ }
+
+ /* Save the new Info arrays in the GPE block */
+
+ gpe_block->register_info = gpe_register_info;
+ gpe_block->event_info = gpe_event_info;
+
+ /*
+ * Initialize the GPE Register and Event structures. A goal of these
+ * tables is to hide the fact that there are two separate GPE register sets
+ * in a given GPE hardware block, the status registers occupy the first half,
+ * and the enable registers occupy the second half.
+ */
+ this_register = gpe_register_info;
+ this_event = gpe_event_info;
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+
+ /* Init the register_info for this GPE register (8 GPEs) */
+
+ this_register->base_gpe_number =
+ (u8) (gpe_block->block_base_number +
+ (i * ACPI_GPE_REGISTER_WIDTH));
+
+ this_register->status_address.address =
+ gpe_block->block_address.address + i;
+
+ this_register->enable_address.address =
+ gpe_block->block_address.address + i +
+ gpe_block->register_count;
+
+ this_register->status_address.space_id =
+ gpe_block->block_address.space_id;
+ this_register->enable_address.space_id =
+ gpe_block->block_address.space_id;
+ this_register->status_address.bit_width =
+ ACPI_GPE_REGISTER_WIDTH;
+ this_register->enable_address.bit_width =
+ ACPI_GPE_REGISTER_WIDTH;
+ this_register->status_address.bit_offset =
+ ACPI_GPE_REGISTER_WIDTH;
+ this_register->enable_address.bit_offset =
+ ACPI_GPE_REGISTER_WIDTH;
+
+ /* Init the event_info for each GPE within this register */
+
+ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
+ this_event->gpe_number =
+ (u8) (this_register->base_gpe_number + j);
+ this_event->register_info = this_register;
+ this_event++;
+ }
+
+ /* Disable all GPEs within this register */
+
+ status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0x00,
+ &this_register->
+ enable_address);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ /* Clear any pending GPE events within this register */
+
+ status = acpi_hw_low_level_write(ACPI_GPE_REGISTER_WIDTH, 0xFF,
+ &this_register->
+ status_address);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ this_register++;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+
+ error_exit:
+ if (gpe_register_info) {
+ ACPI_FREE(gpe_register_info);
+ }
+ if (gpe_event_info) {
+ ACPI_FREE(gpe_event_info);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_create_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE block
+ * gpe_block_address - Address and space_iD
+ * register_count - Number of GPE register pairs in the block
+ * gpe_block_base_number - Starting GPE number for the block
+ * interrupt_number - H/W interrupt for the block
+ * return_gpe_block - Where the new block descriptor is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
+ * the block are disabled at exit.
+ * Note: Assumes namespace is locked.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
+ struct acpi_generic_address *gpe_block_address,
+ u32 register_count,
+ u8 gpe_block_base_number,
+ u32 interrupt_number,
+ struct acpi_gpe_block_info **return_gpe_block)
+{
+ acpi_status status;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_TRACE(ev_create_gpe_block);
+
+ if (!register_count) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Allocate a new GPE block */
+
+ gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
+ if (!gpe_block) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Initialize the new GPE block */
+
+ gpe_block->node = gpe_device;
+ gpe_block->register_count = register_count;
+ gpe_block->block_base_number = gpe_block_base_number;
+
+ ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
+ sizeof(struct acpi_generic_address));
+
+ /*
+ * Create the register_info and event_info sub-structures
+ * Note: disables and clears all GPEs in the block
+ */
+ status = acpi_ev_create_gpe_info_blocks(gpe_block);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(gpe_block);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Install the new block in the global lists */
+
+ status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(gpe_block);
+ return_ACPI_STATUS(status);
+ }
+
+ /* Find all GPE methods (_Lxx, _Exx) for this block */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
+ acpi_ev_save_method_info, gpe_block,
+ NULL);
+
+ /* Return the new block */
+
+ if (return_gpe_block) {
+ (*return_gpe_block) = gpe_block;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
+ (u32) gpe_block->block_base_number,
+ (u32) (gpe_block->block_base_number +
+ ((gpe_block->register_count *
+ ACPI_GPE_REGISTER_WIDTH) - 1)),
+ gpe_device->name.ascii, gpe_block->register_count,
+ interrupt_number));
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_initialize_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE block
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize and enable a GPE block. First find and run any
+ * _PRT methods associated with the block, then enable the
+ * appropriate GPEs.
+ * Note: Assumes namespace is locked.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
+ struct acpi_gpe_block_info *gpe_block)
+{
+ acpi_status status;
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_gpe_walk_info gpe_info;
+ u32 wake_gpe_count;
+ u32 gpe_enabled_count;
+ u32 i;
+ u32 j;
+
+ ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
+
+ /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
+
+ if (!gpe_block) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Runtime option: Should wake GPEs be enabled at runtime? The default
+ * is no, they should only be enabled just as the machine goes to sleep.
+ */
+ if (acpi_gbl_leave_wake_gpes_disabled) {
+ /*
+ * Differentiate runtime vs wake GPEs, via the _PRW control methods.
+ * Each GPE that has one or more _PRWs that reference it is by
+ * definition a wake GPE and will not be enabled while the machine
+ * is running.
+ */
+ gpe_info.gpe_block = gpe_block;
+ gpe_info.gpe_device = gpe_device;
+
+ status =
+ acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
+ acpi_ev_match_prw_and_gpe, &gpe_info,
+ NULL);
+ }
+
+ /*
+ * Enable all GPEs in this block that have these attributes:
+ * 1) are "runtime" or "run/wake" GPEs, and
+ * 2) have a corresponding _Lxx or _Exx method
+ *
+ * Any other GPEs within this block must be enabled via the acpi_enable_gpe()
+ * external interface.
+ */
+ wake_gpe_count = 0;
+ gpe_enabled_count = 0;
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+ for (j = 0; j < 8; j++) {
+
+ /* Get the info block for this particular GPE */
+
+ gpe_event_info =
+ &gpe_block->
+ event_info[((acpi_size) i *
+ ACPI_GPE_REGISTER_WIDTH) + j];
+
+ if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_METHOD)
+ && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
+ gpe_enabled_count++;
+ }
+
+ if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
+ wake_gpe_count++;
+ }
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
+ wake_gpe_count, gpe_enabled_count));
+
+ /* Enable all valid runtime GPEs found above */
+
+ status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
+ gpe_block));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_initialize
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize the GPE data structures
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_gpe_initialize(void)
+{
+ u32 register_count0 = 0;
+ u32 register_count1 = 0;
+ u32 gpe_number_max = 0;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_initialize);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Initialize the GPE Block(s) defined in the FADT
+ *
+ * Why the GPE register block lengths are divided by 2: From the ACPI Spec,
+ * section "General-Purpose Event Registers", we have:
+ *
+ * "Each register block contains two registers of equal length
+ * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
+ * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
+ * The length of the GPE1_STS and GPE1_EN registers is equal to
+ * half the GPE1_LEN. If a generic register block is not supported
+ * then its respective block pointer and block length values in the
+ * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
+ * to be the same size."
+ */
+
+ /*
+ * Determine the maximum GPE number for this machine.
+ *
+ * Note: both GPE0 and GPE1 are optional, and either can exist without
+ * the other.
+ *
+ * If EITHER the register length OR the block address are zero, then that
+ * particular block is not supported.
+ */
+ if (acpi_gbl_FADT.gpe0_block_length &&
+ acpi_gbl_FADT.xgpe0_block.address) {
+
+ /* GPE block 0 exists (has both length and address > 0) */
+
+ register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
+
+ gpe_number_max =
+ (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
+
+ /* Install GPE Block 0 */
+
+ status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe0_block,
+ register_count0, 0,
+ acpi_gbl_FADT.sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks[0]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 0"));
+ }
+ }
+
+ if (acpi_gbl_FADT.gpe1_block_length &&
+ acpi_gbl_FADT.xgpe1_block.address) {
+
+ /* GPE block 1 exists (has both length and address > 0) */
+
+ register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
+
+ /* Check for GPE0/GPE1 overlap (if both banks exist) */
+
+ if ((register_count0) &&
+ (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
+ ACPI_ERROR((AE_INFO,
+ "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
+ gpe_number_max, acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.gpe1_base +
+ ((register_count1 *
+ ACPI_GPE_REGISTER_WIDTH) - 1)));
+
+ /* Ignore GPE1 block by setting the register count to zero */
+
+ register_count1 = 0;
+ } else {
+ /* Install GPE Block 1 */
+
+ status =
+ acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
+ &acpi_gbl_FADT.xgpe1_block,
+ register_count1,
+ acpi_gbl_FADT.gpe1_base,
+ acpi_gbl_FADT.
+ sci_interrupt,
+ &acpi_gbl_gpe_fadt_blocks
+ [1]);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not create GPE Block 1"));
+ }
+
+ /*
+ * GPE0 and GPE1 do not have to be contiguous in the GPE number
+ * space. However, GPE0 always starts at GPE number zero.
+ */
+ gpe_number_max = acpi_gbl_FADT.gpe1_base +
+ ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
+ }
+ }
+
+ /* Exit if there are no GPE registers */
+
+ if ((register_count0 + register_count1) == 0) {
+
+ /* GPEs are not required by ACPI, this is OK */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "There are no GPE blocks defined in the FADT\n"));
+ status = AE_OK;
+ goto cleanup;
+ }
+
+ /* Check for Max GPE number out-of-range */
+
+ if (gpe_number_max > ACPI_GPE_MAX) {
+ ACPI_ERROR((AE_INFO,
+ "Maximum GPE number from FADT is too large: 0x%X",
+ gpe_number_max));
+ status = AE_BAD_VALUE;
+ goto cleanup;
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
new file mode 100644
index 0000000..1d5670b
--- /dev/null
+++ b/drivers/acpi/events/evmisc.c
@@ -0,0 +1,631 @@
+/******************************************************************************
+ *
+ * Module Name: evmisc - Miscellaneous event manager support functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evmisc")
+
+/* Pointer to FACS needed for the Global Lock */
+static struct acpi_table_facs *facs = NULL;
+
+/* Local prototypes */
+
+static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
+
+static u32 acpi_ev_global_lock_handler(void *context);
+
+static acpi_status acpi_ev_remove_global_lock_handler(void);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_is_notify_object
+ *
+ * PARAMETERS: Node - Node to check
+ *
+ * RETURN: TRUE if notifies allowed on this object
+ *
+ * DESCRIPTION: Check type of node for a object that supports notifies.
+ *
+ * TBD: This could be replaced by a flag bit in the node.
+ *
+ ******************************************************************************/
+
+u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
+{
+ switch (node->type) {
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_THERMAL:
+ /*
+ * These are the ONLY objects that can receive ACPI notifications
+ */
+ return (TRUE);
+
+ default:
+ return (FALSE);
+ }
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_queue_notify_request
+ *
+ * PARAMETERS: Node - NS node for the notified object
+ * notify_value - Value from the Notify() request
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dispatch a device notification event to a previously
+ * installed handler.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
+ u32 notify_value)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj = NULL;
+ union acpi_generic_state *notify_info;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_NAME(ev_queue_notify_request);
+
+ /*
+ * For value 3 (Ejection Request), some device method may need to be run.
+ * For value 2 (Device Wake) if _PRW exists, the _PS0 method may need
+ * to be run.
+ * For value 0x80 (Status Change) on the power button or sleep button,
+ * initiate soft-off or sleep operation?
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
+ acpi_ut_get_node_name(node), node, notify_value,
+ acpi_ut_get_notify_name(notify_value)));
+
+ /* Get the notify object attached to the NS Node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+
+ /* We have the notify object, Get the right handler */
+
+ switch (node->type) {
+
+ /* Notify allowed only on these types */
+
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_THERMAL:
+ case ACPI_TYPE_PROCESSOR:
+
+ if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+ handler_obj =
+ obj_desc->common_notify.system_notify;
+ } else {
+ handler_obj =
+ obj_desc->common_notify.device_notify;
+ }
+ break;
+
+ default:
+ /* All other types are not supported */
+ return (AE_TYPE);
+ }
+ }
+
+ /*
+ * If there is any handler to run, schedule the dispatcher.
+ * Check for:
+ * 1) Global system notify handler
+ * 2) Global device notify handler
+ * 3) Per-device notify handler
+ */
+ if ((acpi_gbl_system_notify.handler
+ && (notify_value <= ACPI_MAX_SYS_NOTIFY))
+ || (acpi_gbl_device_notify.handler
+ && (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) {
+ notify_info = acpi_ut_create_generic_state();
+ if (!notify_info) {
+ return (AE_NO_MEMORY);
+ }
+
+ if (!handler_obj) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Executing system notify handler for Notify (%4.4s, %X) node %p\n",
+ acpi_ut_get_node_name(node),
+ notify_value, node));
+ }
+
+ notify_info->common.descriptor_type =
+ ACPI_DESC_TYPE_STATE_NOTIFY;
+ notify_info->notify.node = node;
+ notify_info->notify.value = (u16) notify_value;
+ notify_info->notify.handler_obj = handler_obj;
+
+ status =
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
+ notify_info);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_generic_state(notify_info);
+ }
+ } else {
+ /*
+ * There is no notify handler (per-device or system) for this device.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No notify handler for Notify (%4.4s, %X) node %p\n",
+ acpi_ut_get_node_name(node), notify_value,
+ node));
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_notify_dispatch
+ *
+ * PARAMETERS: Context - To be passed to the notify handler
+ *
+ * RETURN: None.
+ *
+ * DESCRIPTION: Dispatch a device notification event to a previously
+ * installed handler.
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
+{
+ union acpi_generic_state *notify_info =
+ (union acpi_generic_state *)context;
+ acpi_notify_handler global_handler = NULL;
+ void *global_context = NULL;
+ union acpi_operand_object *handler_obj;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * We will invoke a global notify handler if installed.
+ * This is done _before_ we invoke the per-device handler attached
+ * to the device.
+ */
+ if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
+
+ /* Global system notification handler */
+
+ if (acpi_gbl_system_notify.handler) {
+ global_handler = acpi_gbl_system_notify.handler;
+ global_context = acpi_gbl_system_notify.context;
+ }
+ } else {
+ /* Global driver notification handler */
+
+ if (acpi_gbl_device_notify.handler) {
+ global_handler = acpi_gbl_device_notify.handler;
+ global_context = acpi_gbl_device_notify.context;
+ }
+ }
+
+ /* Invoke the system handler first, if present */
+
+ if (global_handler) {
+ global_handler(notify_info->notify.node,
+ notify_info->notify.value, global_context);
+ }
+
+ /* Now invoke the per-device handler, if present */
+
+ handler_obj = notify_info->notify.handler_obj;
+ if (handler_obj) {
+ handler_obj->notify.handler(notify_info->notify.node,
+ notify_info->notify.value,
+ handler_obj->notify.context);
+ }
+
+ /* All done with the info object */
+
+ acpi_ut_delete_generic_state(notify_info);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_global_lock_handler
+ *
+ * PARAMETERS: Context - From thread interface, not used
+ *
+ * RETURN: ACPI_INTERRUPT_HANDLED
+ *
+ * DESCRIPTION: Invoked directly from the SCI handler when a global lock
+ * release interrupt occurs. Attempt to acquire the global lock,
+ * if successful, signal the thread waiting for the lock.
+ *
+ * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
+ * this is not possible for some reason, a separate thread will have to be
+ * scheduled to do this.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ev_global_lock_handler(void *context)
+{
+ u8 acquired = FALSE;
+
+ /*
+ * Attempt to get the lock.
+ *
+ * If we don't get it now, it will be marked pending and we will
+ * take another interrupt when it becomes free.
+ */
+ ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
+ if (acquired) {
+
+ /* Got the lock, now wake all threads waiting for it */
+
+ acpi_gbl_global_lock_acquired = TRUE;
+ /* Send a unit to the semaphore */
+
+ if (ACPI_FAILURE
+ (acpi_os_signal_semaphore
+ (acpi_gbl_global_lock_semaphore, 1))) {
+ ACPI_ERROR((AE_INFO,
+ "Could not signal Global Lock semaphore"));
+ }
+ }
+
+ return (ACPI_INTERRUPT_HANDLED);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_init_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for the global lock release event
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_init_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+
+ status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+ ACPI_CAST_INDIRECT_PTR(struct
+ acpi_table_header,
+ &facs));
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_gbl_global_lock_present = TRUE;
+ status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler,
+ NULL);
+
+ /*
+ * If the global lock does not exist on this platform, the attempt
+ * to enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick)
+ * Map to AE_OK, but mark global lock as not present.
+ * Any attempt to actually use the global lock will be flagged
+ * with an error.
+ */
+ if (status == AE_NO_HARDWARE_RESPONSE) {
+ ACPI_ERROR((AE_INFO,
+ "No response from Global Lock hardware, disabling lock"));
+
+ acpi_gbl_global_lock_present = FALSE;
+ status = AE_OK;
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_remove_global_lock_handler
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove the handler for the Global Lock
+ *
+ ******************************************************************************/
+
+static acpi_status acpi_ev_remove_global_lock_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
+
+ acpi_gbl_global_lock_present = FALSE;
+ status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
+ acpi_ev_global_lock_handler);
+
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_acquire_global_lock
+ *
+ * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Attempt to gain ownership of the Global Lock.
+ *
+ * MUTEX: Interpreter must be locked
+ *
+ * Note: The original implementation allowed multiple threads to "acquire" the
+ * Global Lock, and the OS would hold the lock until the last thread had
+ * released it. However, this could potentially starve the BIOS out of the
+ * lock, especially in the case where there is a tight handshake between the
+ * Embedded Controller driver and the BIOS. Therefore, this implementation
+ * allows only one thread to acquire the HW Global Lock at a time, and makes
+ * the global lock appear as a standard mutex on the OS side.
+ *
+ *****************************************************************************/
+static acpi_thread_id acpi_ev_global_lock_thread_id;
+static int acpi_ev_global_lock_acquired;
+
+acpi_status acpi_ev_acquire_global_lock(u16 timeout)
+{
+ acpi_status status = AE_OK;
+ u8 acquired = FALSE;
+
+ ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
+
+ /*
+ * Only one thread can acquire the GL at a time, the global_lock_mutex
+ * enforces this. This interface releases the interpreter if we must wait.
+ */
+ status = acpi_ex_system_wait_mutex(
+ acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
+ if (status == AE_TIME) {
+ if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
+ acpi_ev_global_lock_acquired++;
+ return AE_OK;
+ }
+ }
+
+ if (ACPI_FAILURE(status)) {
+ status = acpi_ex_system_wait_mutex(
+ acpi_gbl_global_lock_mutex->mutex.os_mutex,
+ timeout);
+ }
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
+ acpi_ev_global_lock_acquired++;
+
+ /*
+ * Update the global lock handle and check for wraparound. The handle is
+ * only used for the external global lock interfaces, but it is updated
+ * here to properly handle the case where a single thread may acquire the
+ * lock via both the AML and the acpi_acquire_global_lock interfaces. The
+ * handle is therefore updated on the first acquire from a given thread
+ * regardless of where the acquisition request originated.
+ */
+ acpi_gbl_global_lock_handle++;
+ if (acpi_gbl_global_lock_handle == 0) {
+ acpi_gbl_global_lock_handle = 1;
+ }
+
+ /*
+ * Make sure that a global lock actually exists. If not, just treat
+ * the lock as a standard mutex.
+ */
+ if (!acpi_gbl_global_lock_present) {
+ acpi_gbl_global_lock_acquired = TRUE;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Attempt to acquire the actual hardware lock */
+
+ ACPI_ACQUIRE_GLOBAL_LOCK(facs, acquired);
+ if (acquired) {
+
+ /* We got the lock */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Acquired hardware Global Lock\n"));
+
+ acpi_gbl_global_lock_acquired = TRUE;
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Did not get the lock. The pending bit was set above, and we must now
+ * wait until we get the global lock released interrupt.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n"));
+
+ /*
+ * Wait for handshake with the global lock interrupt handler.
+ * This interface releases the interpreter if we must wait.
+ */
+ status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore,
+ ACPI_WAIT_FOREVER);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_release_global_lock
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Releases ownership of the Global Lock.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_release_global_lock(void)
+{
+ u8 pending = FALSE;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ev_release_global_lock);
+
+ /* Lock must be already acquired */
+
+ if (!acpi_gbl_global_lock_acquired) {
+ ACPI_WARNING((AE_INFO,
+ "Cannot release the ACPI Global Lock, it has not been acquired"));
+ return_ACPI_STATUS(AE_NOT_ACQUIRED);
+ }
+
+ acpi_ev_global_lock_acquired--;
+ if (acpi_ev_global_lock_acquired > 0) {
+ return AE_OK;
+ }
+
+ if (acpi_gbl_global_lock_present) {
+
+ /* Allow any thread to release the lock */
+
+ ACPI_RELEASE_GLOBAL_LOCK(facs, pending);
+
+ /*
+ * If the pending bit was set, we must write GBL_RLS to the control
+ * register
+ */
+ if (pending) {
+ status =
+ acpi_set_register(ACPI_BITREG_GLOBAL_LOCK_RELEASE,
+ 1);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Released hardware Global Lock\n"));
+ }
+
+ acpi_gbl_global_lock_acquired = FALSE;
+
+ /* Release the local GL mutex */
+ acpi_ev_global_lock_thread_id = NULL;
+ acpi_ev_global_lock_acquired = 0;
+ acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_terminate
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: none
+ *
+ * DESCRIPTION: Disable events and free memory allocated for table storage.
+ *
+ ******************************************************************************/
+
+void acpi_ev_terminate(void)
+{
+ u32 i;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_terminate);
+
+ if (acpi_gbl_events_initialized) {
+ /*
+ * Disable all event-related functionality.
+ * In all cases, on error, print a message but obviously we don't abort.
+ */
+
+ /* Disable all fixed events */
+
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ status = acpi_disable_event(i, 0);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not disable fixed event %d",
+ (u32) i));
+ }
+ }
+
+ /* Disable all GPEs in all GPE blocks */
+
+ status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block);
+
+ /* Remove SCI handler */
+
+ status = acpi_ev_remove_sci_handler();
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+ }
+
+ status = acpi_ev_remove_global_lock_handler();
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not remove Global Lock handler"));
+ }
+ }
+
+ /* Deallocate all handler objects installed within GPE info structs */
+
+ status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers);
+
+ /* Return to original mode if necessary */
+
+ if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) {
+ status = acpi_disable();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "AcpiDisable failed"));
+ }
+ }
+ return_VOID;
+}
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
new file mode 100644
index 0000000..236fbd1
--- /dev/null
+++ b/drivers/acpi/events/evregion.c
@@ -0,0 +1,1074 @@
+/******************************************************************************
+ *
+ * Module Name: evregion - ACPI address_space (op_region) handler dispatch
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evregion")
+#define ACPI_NUM_DEFAULT_SPACES 4
+static u8 acpi_gbl_default_address_spaces[ACPI_NUM_DEFAULT_SPACES] = {
+ ACPI_ADR_SPACE_SYSTEM_MEMORY,
+ ACPI_ADR_SPACE_SYSTEM_IO,
+ ACPI_ADR_SPACE_PCI_CONFIG,
+ ACPI_ADR_SPACE_DATA_TABLE
+};
+
+/* Local prototypes */
+
+static acpi_status
+acpi_ev_reg_run(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_region_handlers
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Installs the core subsystem default address space handlers.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_install_region_handlers(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ev_install_region_handlers);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * All address spaces (PCI Config, EC, SMBus) are scope dependent
+ * and registration must occur for a specific device.
+ *
+ * In the case of the system memory and IO address spaces there is currently
+ * no device associated with the address space. For these we use the root.
+ *
+ * We install the default PCI config space handler at the root so
+ * that this space is immediately available even though the we have
+ * not enumerated all the PCI Root Buses yet. This is to conform
+ * to the ACPI specification which states that the PCI config
+ * space must be always available -- even though we are nowhere
+ * near ready to find the PCI root buses at this point.
+ *
+ * NOTE: We ignore AE_ALREADY_EXISTS because this means that a handler
+ * has already been installed (via acpi_install_address_space_handler).
+ * Similar for AE_SAME_HANDLER.
+ */
+ for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+ status = acpi_ev_install_space_handler(acpi_gbl_root_node,
+ acpi_gbl_default_address_spaces
+ [i],
+ ACPI_DEFAULT_HANDLER,
+ NULL, NULL);
+ switch (status) {
+ case AE_OK:
+ case AE_SAME_HANDLER:
+ case AE_ALREADY_EXISTS:
+
+ /* These exceptions are all OK */
+
+ status = AE_OK;
+ break;
+
+ default:
+
+ goto unlock_and_exit;
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_initialize_op_regions
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute _REG methods for all Operation Regions that have
+ * an installed default region handler.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_initialize_op_regions(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ev_initialize_op_regions);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Run the _REG methods for op_regions in each default address space
+ */
+ for (i = 0; i < ACPI_NUM_DEFAULT_SPACES; i++) {
+
+ /* TBD: Make sure handler is the DEFAULT handler, otherwise
+ * _REG will have already been run.
+ */
+ status = acpi_ev_execute_reg_methods(acpi_gbl_root_node,
+ acpi_gbl_default_address_spaces
+ [i]);
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_execute_reg_method
+ *
+ * PARAMETERS: region_obj - Region object
+ * Function - Passed to _REG: On (1) or Off (0)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute _REG method for a region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
+{
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[3];
+ union acpi_operand_object *region_obj2;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_execute_reg_method);
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ if (region_obj2->extra.method_REG == NULL) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Allocate and initialize the evaluation information block */
+
+ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ info->prefix_node = region_obj2->extra.method_REG;
+ info->pathname = NULL;
+ info->parameters = args;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ /*
+ * The _REG method has two arguments:
+ *
+ * Arg0 - Integer:
+ * Operation region space ID Same value as region_obj->Region.space_id
+ *
+ * Arg1 - Integer:
+ * connection status 1 for connecting the handler, 0 for disconnecting
+ * the handler (Passed as a parameter)
+ */
+ args[0] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!args[0]) {
+ status = AE_NO_MEMORY;
+ goto cleanup1;
+ }
+
+ args[1] = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
+ if (!args[1]) {
+ status = AE_NO_MEMORY;
+ goto cleanup2;
+ }
+
+ /* Setup the parameter objects */
+
+ args[0]->integer.value = region_obj->region.space_id;
+ args[1]->integer.value = function;
+ args[2] = NULL;
+
+ /* Execute the method, no return value */
+
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, info->prefix_node, NULL));
+
+ status = acpi_ns_evaluate(info);
+ acpi_ut_remove_reference(args[1]);
+
+ cleanup2:
+ acpi_ut_remove_reference(args[0]);
+
+ cleanup1:
+ ACPI_FREE(info);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_address_space_dispatch
+ *
+ * PARAMETERS: region_obj - Internal region object
+ * Function - Read or Write operation
+ * Address - Where in the space to read or write
+ * bit_width - Field width in bits (8, 16, 32, or 64)
+ * Value - Pointer to in or out value, must be
+ * full 64-bit acpi_integer
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dispatch an address space or operation region access to
+ * a previously installed handler.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ u32 function,
+ acpi_physical_address address,
+ u32 bit_width, acpi_integer * value)
+{
+ acpi_status status;
+ acpi_adr_space_handler handler;
+ acpi_adr_space_setup region_setup;
+ union acpi_operand_object *handler_desc;
+ union acpi_operand_object *region_obj2;
+ void *region_context = NULL;
+
+ ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /* Ensure that there is a handler associated with this region */
+
+ handler_desc = region_obj->region.handler;
+ if (!handler_desc) {
+ ACPI_ERROR((AE_INFO,
+ "No handler for Region [%4.4s] (%p) [%s]",
+ acpi_ut_get_node_name(region_obj->region.node),
+ region_obj,
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /*
+ * It may be the case that the region has never been initialized
+ * Some types of regions require special init code
+ */
+ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
+ /*
+ * This region has not been initialized yet, do it
+ */
+ region_setup = handler_desc->address_space.setup;
+ if (!region_setup) {
+
+ /* No initialization routine, exit with error */
+
+ ACPI_ERROR((AE_INFO,
+ "No init routine for region(%p) [%s]",
+ region_obj,
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ /*
+ * We must exit the interpreter because the region
+ * setup will potentially execute control methods
+ * (e.g., _REG method for this region)
+ */
+ acpi_ex_exit_interpreter();
+
+ status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
+ handler_desc->address_space.context,
+ &region_context);
+
+ /* Re-enter the interpreter */
+
+ acpi_ex_enter_interpreter();
+
+ /* Check for failure of the Region Setup */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During region initialization: [%s]",
+ acpi_ut_get_region_name(region_obj->
+ region.
+ space_id)));
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Region initialization may have been completed by region_setup
+ */
+ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
+ region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
+
+ if (region_obj2->extra.region_context) {
+
+ /* The handler for this region was already installed */
+
+ ACPI_FREE(region_context);
+ } else {
+ /*
+ * Save the returned context for use in all accesses to
+ * this particular region
+ */
+ region_obj2->extra.region_context =
+ region_context;
+ }
+ }
+ }
+
+ /* We have everything we need, we can invoke the address space handler */
+
+ handler = handler_desc->address_space.handler;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
+ &region_obj->region.handler->address_space, handler,
+ ACPI_FORMAT_NATIVE_UINT(address),
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ /*
+ * For handlers other than the default (supplied) handlers, we must
+ * exit the interpreter because the handler *might* block -- we don't
+ * know what it will do, so we can't hold the lock on the intepreter.
+ */
+ acpi_ex_exit_interpreter();
+ }
+
+ /* Call the handler */
+
+ status = handler(function, address, bit_width, value,
+ handler_desc->address_space.context,
+ region_obj2->extra.region_context);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+ }
+
+ if (!(handler_desc->address_space.handler_flags &
+ ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
+ /*
+ * We just returned from a non-default handler, we must re-enter the
+ * interpreter
+ */
+ acpi_ex_enter_interpreter();
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_detach_region
+ *
+ * PARAMETERS: region_obj - Region Object
+ * acpi_ns_is_locked - Namespace Region Already Locked?
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Break the association between the handler and the region
+ * this is a two way association.
+ *
+ ******************************************************************************/
+
+void
+acpi_ev_detach_region(union acpi_operand_object *region_obj,
+ u8 acpi_ns_is_locked)
+{
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object **last_obj_ptr;
+ acpi_adr_space_setup region_setup;
+ void **region_context;
+ union acpi_operand_object *region_obj2;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_detach_region);
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_VOID;
+ }
+ region_context = &region_obj2->extra.region_context;
+
+ /* Get the address handler from the region object */
+
+ handler_obj = region_obj->region.handler;
+ if (!handler_obj) {
+
+ /* This region has no handler, all done */
+
+ return_VOID;
+ }
+
+ /* Find this region in the handler's list */
+
+ obj_desc = handler_obj->address_space.region_list;
+ last_obj_ptr = &handler_obj->address_space.region_list;
+
+ while (obj_desc) {
+
+ /* Is this the correct Region? */
+
+ if (obj_desc == region_obj) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Removing Region %p from address handler %p\n",
+ region_obj, handler_obj));
+
+ /* This is it, remove it from the handler's list */
+
+ *last_obj_ptr = obj_desc->region.next;
+ obj_desc->region.next = NULL; /* Must clear field */
+
+ if (acpi_ns_is_locked) {
+ status =
+ acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+ }
+
+ /* Now stop region accesses by executing the _REG method */
+
+ status = acpi_ev_execute_reg_method(region_obj, 0);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "from region _REG, [%s]",
+ acpi_ut_get_region_name
+ (region_obj->region.space_id)));
+ }
+
+ if (acpi_ns_is_locked) {
+ status =
+ acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+ }
+
+ /*
+ * If the region has been activated, call the setup handler
+ * with the deactivate notification
+ */
+ if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
+ region_setup = handler_obj->address_space.setup;
+ status =
+ region_setup(region_obj,
+ ACPI_REGION_DEACTIVATE,
+ handler_obj->address_space.
+ context, region_context);
+
+ /* Init routine may fail, Just ignore errors */
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "from region handler - deactivate, [%s]",
+ acpi_ut_get_region_name
+ (region_obj->region.
+ space_id)));
+ }
+
+ region_obj->region.flags &=
+ ~(AOPOBJ_SETUP_COMPLETE);
+ }
+
+ /*
+ * Remove handler reference in the region
+ *
+ * NOTE: this doesn't mean that the region goes away, the region
+ * is just inaccessible as indicated to the _REG method
+ *
+ * If the region is on the handler's list, this must be the
+ * region's handler
+ */
+ region_obj->region.handler = NULL;
+ acpi_ut_remove_reference(handler_obj);
+
+ return_VOID;
+ }
+
+ /* Walk the linked list of handlers */
+
+ last_obj_ptr = &obj_desc->region.next;
+ obj_desc = obj_desc->region.next;
+ }
+
+ /* If we get here, the region was not in the handler's region list */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Cannot remove region %p from address handler %p\n",
+ region_obj, handler_obj));
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_attach_region
+ *
+ * PARAMETERS: handler_obj - Handler Object
+ * region_obj - Region Object
+ * acpi_ns_is_locked - Namespace Region Already Locked?
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create the association between the handler and the region
+ * this is a two way association.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_attach_region(union acpi_operand_object *handler_obj,
+ union acpi_operand_object *region_obj,
+ u8 acpi_ns_is_locked)
+{
+
+ ACPI_FUNCTION_TRACE(ev_attach_region);
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Adding Region [%4.4s] %p to address handler %p [%s]\n",
+ acpi_ut_get_node_name(region_obj->region.node),
+ region_obj, handler_obj,
+ acpi_ut_get_region_name(region_obj->region.
+ space_id)));
+
+ /* Link this region to the front of the handler's list */
+
+ region_obj->region.next = handler_obj->address_space.region_list;
+ handler_obj->address_space.region_list = region_obj;
+
+ /* Install the region's handler */
+
+ if (region_obj->region.handler) {
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ }
+
+ region_obj->region.handler = handler_obj;
+ acpi_ut_add_reference(handler_obj);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_handler
+ *
+ * PARAMETERS: walk_namespace callback
+ *
+ * DESCRIPTION: This routine installs an address handler into objects that are
+ * of type Region or Device.
+ *
+ * If the Object is a Device, and the device has a handler of
+ * the same type then the search is terminated in that branch.
+ *
+ * This is because the existing handler is closer in proximity
+ * to any more regions than the one we are trying to install.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_install_handler(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *next_handler_obj;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_NAME(ev_install_handler);
+
+ handler_obj = (union acpi_operand_object *)context;
+
+ /* Parameter validation */
+
+ if (!handler_obj) {
+ return (AE_OK);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(obj_handle);
+ if (!node) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * We only care about regions.and objects
+ * that are allowed to have address space handlers
+ */
+ if ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+ return (AE_OK);
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, just exit */
+
+ return (AE_OK);
+ }
+
+ /* Devices are handled different than regions */
+
+ if (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_DEVICE) {
+
+ /* Check if this Device already has a handler for this address space */
+
+ next_handler_obj = obj_desc->device.handler;
+ while (next_handler_obj) {
+
+ /* Found a handler, is it for the same address space? */
+
+ if (next_handler_obj->address_space.space_id ==
+ handler_obj->address_space.space_id) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Found handler for region [%s] in device %p(%p) handler %p\n",
+ acpi_ut_get_region_name
+ (handler_obj->address_space.
+ space_id), obj_desc,
+ next_handler_obj,
+ handler_obj));
+
+ /*
+ * Since the object we found it on was a device, then it
+ * means that someone has already installed a handler for
+ * the branch of the namespace from this device on. Just
+ * bail out telling the walk routine to not traverse this
+ * branch. This preserves the scoping rule for handlers.
+ */
+ return (AE_CTRL_DEPTH);
+ }
+
+ /* Walk the linked list of handlers attached to this device */
+
+ next_handler_obj = next_handler_obj->address_space.next;
+ }
+
+ /*
+ * As long as the device didn't have a handler for this
+ * space we don't care about it. We just ignore it and
+ * proceed.
+ */
+ return (AE_OK);
+ }
+
+ /* Object is a Region */
+
+ if (obj_desc->region.space_id != handler_obj->address_space.space_id) {
+ /*
+ * This region is for a different address space
+ * -- just ignore it
+ */
+ return (AE_OK);
+ }
+
+ /*
+ * Now we have a region and it is for the handler's address
+ * space type.
+ *
+ * First disconnect region for any previous handler (if any)
+ */
+ acpi_ev_detach_region(obj_desc, FALSE);
+
+ /* Connect the region to the new handler */
+
+ status = acpi_ev_attach_region(handler_obj, obj_desc, FALSE);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_space_handler
+ *
+ * PARAMETERS: Node - Namespace node for the device
+ * space_id - The address space ID
+ * Handler - Address of the handler
+ * Setup - Address of the setup function
+ * Context - Value passed to the handler on each access
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for all op_regions of a given space_id.
+ * Assumes namespace is locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_install_space_handler(struct acpi_namespace_node * node,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj;
+ acpi_status status;
+ acpi_object_type type;
+ u8 flags = 0;
+
+ ACPI_FUNCTION_TRACE(ev_install_space_handler);
+
+ /*
+ * This registration is valid for only the types below
+ * and the root. This is where the default handlers
+ * get placed.
+ */
+ if ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_PROCESSOR) &&
+ (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node)) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ if (handler == ACPI_DEFAULT_HANDLER) {
+ flags = ACPI_ADDR_HANDLER_DEFAULT_INSTALLED;
+
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ handler = acpi_ex_system_memory_space_handler;
+ setup = acpi_ev_system_memory_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ handler = acpi_ex_system_io_space_handler;
+ setup = acpi_ev_io_space_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ handler = acpi_ex_pci_config_space_handler;
+ setup = acpi_ev_pci_config_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_CMOS:
+ handler = acpi_ex_cmos_space_handler;
+ setup = acpi_ev_cmos_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ handler = acpi_ex_pci_bar_space_handler;
+ setup = acpi_ev_pci_bar_region_setup;
+ break;
+
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ handler = acpi_ex_data_table_space_handler;
+ setup = NULL;
+ break;
+
+ default:
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ }
+
+ /* If the caller hasn't specified a setup routine, use the default */
+
+ if (!setup) {
+ setup = acpi_ev_default_region_setup;
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+ /*
+ * The attached device object already exists.
+ * Make sure the handler is not already installed.
+ */
+ handler_obj = obj_desc->device.handler;
+
+ /* Walk the handler list for this device */
+
+ while (handler_obj) {
+
+ /* Same space_id indicates a handler already installed */
+
+ if (handler_obj->address_space.space_id == space_id) {
+ if (handler_obj->address_space.handler ==
+ handler) {
+ /*
+ * It is (relatively) OK to attempt to install the SAME
+ * handler twice. This can easily happen
+ * with PCI_Config space.
+ */
+ status = AE_SAME_HANDLER;
+ goto unlock_and_exit;
+ } else {
+ /* A handler is already installed */
+
+ status = AE_ALREADY_EXISTS;
+ }
+ goto unlock_and_exit;
+ }
+
+ /* Walk the linked list of handlers */
+
+ handler_obj = handler_obj->address_space.next;
+ }
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Creating object on Device %p while installing handler\n",
+ node));
+
+ /* obj_desc does not exist, create one */
+
+ if (node->type == ACPI_TYPE_ANY) {
+ type = ACPI_TYPE_DEVICE;
+ } else {
+ type = node->type;
+ }
+
+ obj_desc = acpi_ut_create_internal_object(type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Init new descriptor */
+
+ obj_desc->common.type = (u8) type;
+
+ /* Attach the new object to the Node */
+
+ status = acpi_ns_attach_object(node, obj_desc, type);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Installing address handler for region %s(%X) on Device %4.4s %p(%p)\n",
+ acpi_ut_get_region_name(space_id), space_id,
+ acpi_ut_get_node_name(node), node, obj_desc));
+
+ /*
+ * Install the handler
+ *
+ * At this point there is no existing handler.
+ * Just allocate the object for the handler and link it
+ * into the list.
+ */
+ handler_obj =
+ acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_ADDRESS_HANDLER);
+ if (!handler_obj) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Init handler obj */
+
+ handler_obj->address_space.space_id = (u8) space_id;
+ handler_obj->address_space.handler_flags = flags;
+ handler_obj->address_space.region_list = NULL;
+ handler_obj->address_space.node = node;
+ handler_obj->address_space.handler = handler;
+ handler_obj->address_space.context = context;
+ handler_obj->address_space.setup = setup;
+
+ /* Install at head of Device.address_space list */
+
+ handler_obj->address_space.next = obj_desc->device.handler;
+
+ /*
+ * The Device object is the first reference on the handler_obj.
+ * Each region that uses the handler adds a reference.
+ */
+ obj_desc->device.handler = handler_obj;
+
+ /*
+ * Walk the namespace finding all of the regions this
+ * handler will manage.
+ *
+ * Start at the device and search the branch toward
+ * the leaf nodes until either the leaf is encountered or
+ * a device is detected that has an address handler of the
+ * same type.
+ *
+ * In either case, back up and search down the remainder
+ * of the branch
+ */
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+ ACPI_NS_WALK_UNLOCK,
+ acpi_ev_install_handler, handler_obj,
+ NULL);
+
+ unlock_and_exit:
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_execute_reg_methods
+ *
+ * PARAMETERS: Node - Namespace node for the device
+ * space_id - The address space ID
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Run all _REG methods for the input Space ID;
+ * Note: assumes namespace is locked, or system init time.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
+
+ /*
+ * Run all _REG methods for all Operation Regions for this
+ * space ID. This is a separate walk in order to handle any
+ * interdependencies between regions and _REG methods. (i.e. handlers
+ * must be installed for all regions of this Space ID before we
+ * can run any _REG methods)
+ */
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
+ ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
+ &space_id, NULL);
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_reg_run
+ *
+ * PARAMETERS: walk_namespace callback
+ *
+ * DESCRIPTION: Run _REG method for region objects of the requested space_iD
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ev_reg_run(acpi_handle obj_handle,
+ u32 level, void *context, void **return_value)
+{
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ acpi_adr_space_type space_id;
+ acpi_status status;
+
+ space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context);
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(obj_handle);
+ if (!node) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * We only care about regions.and objects
+ * that are allowed to have address space handlers
+ */
+ if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
+ return (AE_OK);
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, just exit */
+
+ return (AE_OK);
+ }
+
+ /* Object is a Region */
+
+ if (obj_desc->region.space_id != space_id) {
+ /*
+ * This region is for a different address space
+ * -- just ignore it
+ */
+ return (AE_OK);
+ }
+
+ status = acpi_ev_execute_reg_method(obj_desc, 1);
+ return (status);
+}
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
new file mode 100644
index 0000000..6b94b38
--- /dev/null
+++ b/drivers/acpi/events/evrgnini.c
@@ -0,0 +1,688 @@
+/******************************************************************************
+ *
+ * Module Name: evrgnini- ACPI address_space (op_region) init
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evrgnini")
+
+/* Local prototypes */
+static u8 acpi_ev_match_pci_root_bridge(char *id);
+
+static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_system_memory_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a system_memory operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_system_memory_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ union acpi_operand_object *region_desc =
+ (union acpi_operand_object *)handle;
+ struct acpi_mem_space_context *local_region_context;
+
+ ACPI_FUNCTION_TRACE(ev_system_memory_region_setup);
+
+ if (function == ACPI_REGION_DEACTIVATE) {
+ if (*region_context) {
+ local_region_context =
+ (struct acpi_mem_space_context *)*region_context;
+
+ /* Delete a cached mapping if present */
+
+ if (local_region_context->mapped_length) {
+ acpi_os_unmap_memory(local_region_context->
+ mapped_logical_address,
+ local_region_context->
+ mapped_length);
+ }
+ ACPI_FREE(local_region_context);
+ *region_context = NULL;
+ }
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Create a new context */
+
+ local_region_context =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_mem_space_context));
+ if (!(local_region_context)) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Save the region length and address for use in the handler */
+
+ local_region_context->length = region_desc->region.length;
+ local_region_context->address = region_desc->region.address;
+
+ *region_context = local_region_context;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_io_space_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a IO operation region
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_io_space_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ ACPI_FUNCTION_TRACE(ev_io_space_region_setup);
+
+ if (function == ACPI_REGION_DEACTIVATE) {
+ *region_context = NULL;
+ } else {
+ *region_context = handler_context;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_pci_config_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a PCI_Config operation region
+ *
+ * MUTEX: Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_pci_config_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ acpi_status status = AE_OK;
+ acpi_integer pci_value;
+ struct acpi_pci_id *pci_id = *region_context;
+ union acpi_operand_object *handler_obj;
+ struct acpi_namespace_node *parent_node;
+ struct acpi_namespace_node *pci_root_node;
+ struct acpi_namespace_node *pci_device_node;
+ union acpi_operand_object *region_obj =
+ (union acpi_operand_object *)handle;
+
+ ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
+
+ handler_obj = region_obj->region.handler;
+ if (!handler_obj) {
+ /*
+ * No installed handler. This shouldn't happen because the dispatch
+ * routine checks before we get here, but we check again just in case.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Attempting to init a region %p, with no handler\n",
+ region_obj));
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ *region_context = NULL;
+ if (function == ACPI_REGION_DEACTIVATE) {
+ if (pci_id) {
+ ACPI_FREE(pci_id);
+ }
+ return_ACPI_STATUS(status);
+ }
+
+ parent_node = acpi_ns_get_parent_node(region_obj->region.node);
+
+ /*
+ * Get the _SEG and _BBN values from the device upon which the handler
+ * is installed.
+ *
+ * We need to get the _SEG and _BBN objects relative to the PCI BUS device.
+ * This is the device the handler has been registered to handle.
+ */
+
+ /*
+ * If the address_space.Node is still pointing to the root, we need
+ * to scan upward for a PCI Root bridge and re-associate the op_region
+ * handlers with that device.
+ */
+ if (handler_obj->address_space.node == acpi_gbl_root_node) {
+
+ /* Start search from the parent object */
+
+ pci_root_node = parent_node;
+ while (pci_root_node != acpi_gbl_root_node) {
+
+ /* Get the _HID/_CID in order to detect a root_bridge */
+
+ if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
+
+ /* Install a handler for this PCI root bridge */
+
+ status =
+ acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_SAME_HANDLER) {
+ /*
+ * It is OK if the handler is already installed on the root
+ * bridge. Still need to return a context object for the
+ * new PCI_Config operation region, however.
+ */
+ status = AE_OK;
+ } else {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not install PciConfig handler for Root Bridge %4.4s",
+ acpi_ut_get_node_name
+ (pci_root_node)));
+ }
+ }
+ break;
+ }
+
+ pci_root_node = acpi_ns_get_parent_node(pci_root_node);
+ }
+
+ /* PCI root bridge not found, use namespace root node */
+ } else {
+ pci_root_node = handler_obj->address_space.node;
+ }
+
+ /*
+ * If this region is now initialized, we are done.
+ * (install_address_space_handler could have initialized it)
+ */
+ if (region_obj->region.flags & AOPOBJ_SETUP_COMPLETE) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Region is still not initialized. Create a new context */
+
+ pci_id = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pci_id));
+ if (!pci_id) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * For PCI_Config space access, we need the segment, bus,
+ * device and function numbers. Acquire them here.
+ *
+ * Find the parent device object. (This allows the operation region to be
+ * within a subscope under the device, such as a control method.)
+ */
+ pci_device_node = region_obj->region.node;
+ while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
+ pci_device_node = acpi_ns_get_parent_node(pci_device_node);
+ }
+
+ if (!pci_device_node) {
+ ACPI_FREE(pci_id);
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ /*
+ * Get the PCI device and function numbers from the _ADR object
+ * contained in the parent's scope.
+ */
+ status =
+ acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
+ &pci_value);
+
+ /*
+ * The default is zero, and since the allocation above zeroed
+ * the data, just do nothing on failure.
+ */
+ if (ACPI_SUCCESS(status)) {
+ pci_id->device = ACPI_HIWORD(ACPI_LODWORD(pci_value));
+ pci_id->function = ACPI_LOWORD(ACPI_LODWORD(pci_value));
+ }
+
+ /* The PCI segment number comes from the _SEG method */
+
+ status =
+ acpi_ut_evaluate_numeric_object(METHOD_NAME__SEG, pci_root_node,
+ &pci_value);
+ if (ACPI_SUCCESS(status)) {
+ pci_id->segment = ACPI_LOWORD(pci_value);
+ }
+
+ /* The PCI bus number comes from the _BBN method */
+
+ status =
+ acpi_ut_evaluate_numeric_object(METHOD_NAME__BBN, pci_root_node,
+ &pci_value);
+ if (ACPI_SUCCESS(status)) {
+ pci_id->bus = ACPI_LOWORD(pci_value);
+ }
+
+ /* Complete this device's pci_id */
+
+ acpi_os_derive_pci_id(pci_root_node, region_obj->region.node, &pci_id);
+
+ *region_context = pci_id;
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_match_pci_root_bridge
+ *
+ * PARAMETERS: Id - The HID/CID in string format
+ *
+ * RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
+ *
+ * DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
+ *
+ ******************************************************************************/
+
+static u8 acpi_ev_match_pci_root_bridge(char *id)
+{
+
+ /*
+ * Check if this is a PCI root.
+ * ACPI 3.0+: check for a PCI Express root also.
+ */
+ if (!(ACPI_STRNCMP(id,
+ PCI_ROOT_HID_STRING,
+ sizeof(PCI_ROOT_HID_STRING))) ||
+ !(ACPI_STRNCMP(id,
+ PCI_EXPRESS_ROOT_HID_STRING,
+ sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_is_pci_root_bridge
+ *
+ * PARAMETERS: Node - Device node being examined
+ *
+ * RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
+ *
+ * DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
+ * examining the _HID and _CID for the device.
+ *
+ ******************************************************************************/
+
+static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
+{
+ acpi_status status;
+ struct acpica_device_id hid;
+ struct acpi_compatible_id_list *cid;
+ u32 i;
+
+ /*
+ * Get the _HID and check for a PCI Root Bridge
+ */
+ status = acpi_ut_execute_HID(node, &hid);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ if (acpi_ev_match_pci_root_bridge(hid.value)) {
+ return (TRUE);
+ }
+
+ /*
+ * The _HID did not match.
+ * Get the _CID and check for a PCI Root Bridge
+ */
+ status = acpi_ut_execute_CID(node, &cid);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ /* Check all _CIDs in the returned list */
+
+ for (i = 0; i < cid->count; i++) {
+ if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
+ ACPI_FREE(cid);
+ return (TRUE);
+ }
+ }
+
+ ACPI_FREE(cid);
+ return (FALSE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_pci_bar_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a pci_bAR operation region
+ *
+ * MUTEX: Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_pci_bar_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ ACPI_FUNCTION_TRACE(ev_pci_bar_region_setup);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_cmos_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Setup a CMOS operation region
+ *
+ * MUTEX: Assumes namespace is not locked
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_cmos_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ ACPI_FUNCTION_TRACE(ev_cmos_region_setup);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_default_region_setup
+ *
+ * PARAMETERS: Handle - Region we are interested in
+ * Function - Start or stop
+ * handler_context - Address space handler context
+ * region_context - Region specific context
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Default region initialization
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_default_region_setup(acpi_handle handle,
+ u32 function,
+ void *handler_context, void **region_context)
+{
+ ACPI_FUNCTION_TRACE(ev_default_region_setup);
+
+ if (function == ACPI_REGION_DEACTIVATE) {
+ *region_context = NULL;
+ } else {
+ *region_context = handler_context;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_initialize_region
+ *
+ * PARAMETERS: region_obj - Region we are initializing
+ * acpi_ns_locked - Is namespace locked?
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initializes the region, finds any _REG methods and saves them
+ * for execution at a later time
+ *
+ * Get the appropriate address space handler for a newly
+ * created region.
+ *
+ * This also performs address space specific initialization. For
+ * example, PCI regions must have an _ADR object that contains
+ * a PCI address in the scope of the definition. This address is
+ * required to perform an access to PCI config space.
+ *
+ * MUTEX: Interpreter should be unlocked, because we may run the _REG
+ * method for this region.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ev_initialize_region(union acpi_operand_object *region_obj,
+ u8 acpi_ns_locked)
+{
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *obj_desc;
+ acpi_adr_space_type space_id;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+ struct acpi_namespace_node *method_node;
+ acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
+ union acpi_operand_object *region_obj2;
+
+ ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
+
+ if (!region_obj) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ if (region_obj->common.flags & AOPOBJ_OBJECT_INITIALIZED) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2) {
+ return_ACPI_STATUS(AE_NOT_EXIST);
+ }
+
+ node = acpi_ns_get_parent_node(region_obj->region.node);
+ space_id = region_obj->region.space_id;
+
+ /* Setup defaults */
+
+ region_obj->region.handler = NULL;
+ region_obj2->extra.method_REG = NULL;
+ region_obj->common.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+ region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
+
+ /* Find any "_REG" method associated with this region definition */
+
+ status =
+ acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
+ &method_node);
+ if (ACPI_SUCCESS(status)) {
+ /*
+ * The _REG method is optional and there can be only one per region
+ * definition. This will be executed when the handler is attached
+ * or removed
+ */
+ region_obj2->extra.method_REG = method_node;
+ }
+
+ /*
+ * The following loop depends upon the root Node having no parent
+ * ie: acpi_gbl_root_node->parent_entry being set to NULL
+ */
+ while (node) {
+
+ /* Check to see if a handler exists */
+
+ handler_obj = NULL;
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+
+ /* Can only be a handler if the object exists */
+
+ switch (node->type) {
+ case ACPI_TYPE_DEVICE:
+
+ handler_obj = obj_desc->device.handler;
+ break;
+
+ case ACPI_TYPE_PROCESSOR:
+
+ handler_obj = obj_desc->processor.handler;
+ break;
+
+ case ACPI_TYPE_THERMAL:
+
+ handler_obj = obj_desc->thermal_zone.handler;
+ break;
+
+ default:
+ /* Ignore other objects */
+ break;
+ }
+
+ while (handler_obj) {
+
+ /* Is this handler of the correct type? */
+
+ if (handler_obj->address_space.space_id ==
+ space_id) {
+
+ /* Found correct handler */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Found handler %p for region %p in obj %p\n",
+ handler_obj,
+ region_obj,
+ obj_desc));
+
+ status =
+ acpi_ev_attach_region(handler_obj,
+ region_obj,
+ acpi_ns_locked);
+
+ /*
+ * Tell all users that this region is usable by running the _REG
+ * method
+ */
+ if (acpi_ns_locked) {
+ status =
+ acpi_ut_release_mutex
+ (ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS
+ (status);
+ }
+ }
+
+ status =
+ acpi_ev_execute_reg_method
+ (region_obj, 1);
+
+ if (acpi_ns_locked) {
+ status =
+ acpi_ut_acquire_mutex
+ (ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS
+ (status);
+ }
+ }
+
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Try next handler in the list */
+
+ handler_obj = handler_obj->address_space.next;
+ }
+ }
+
+ /*
+ * This node does not have the handler we need;
+ * Pop up one level
+ */
+ node = acpi_ns_get_parent_node(node);
+ }
+
+ /* If we get here, there is no handler for this region */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "No handler for RegionType %s(%X) (RegionObj %p)\n",
+ acpi_ut_get_region_name(space_id), space_id,
+ region_obj));
+
+ return_ACPI_STATUS(AE_NOT_EXIST);
+}
diff --git a/drivers/acpi/events/evsci.c b/drivers/acpi/events/evsci.c
new file mode 100644
index 0000000..2a8b778
--- /dev/null
+++ b/drivers/acpi/events/evsci.c
@@ -0,0 +1,184 @@
+/*******************************************************************************
+ *
+ * Module Name: evsci - System Control Interrupt configuration and
+ * legacy to ACPI mode state transition functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evsci")
+
+/* Local prototypes */
+static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_sci_xrupt_handler
+ *
+ * PARAMETERS: Context - Calling Context
+ *
+ * RETURN: Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Interrupt handler that will figure out what function or
+ * control method to call to deal with a SCI.
+ *
+ ******************************************************************************/
+
+static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
+ u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
+
+ ACPI_FUNCTION_TRACE(ev_sci_xrupt_handler);
+
+ /*
+ * We are guaranteed by the ACPI CA initialization/shutdown code that
+ * if this interrupt handler is installed, ACPI is enabled.
+ */
+
+ /*
+ * Fixed Events:
+ * Check for and dispatch any Fixed Events that have occurred
+ */
+ interrupt_handled |= acpi_ev_fixed_event_detect();
+
+ /*
+ * General Purpose Events:
+ * Check for and dispatch any GPEs that have occurred
+ */
+ interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+
+ return_UINT32(interrupt_handled);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ev_gpe_xrupt_handler
+ *
+ * PARAMETERS: Context - Calling Context
+ *
+ * RETURN: Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Handler for GPE Block Device interrupts
+ *
+ ******************************************************************************/
+
+u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
+{
+ struct acpi_gpe_xrupt_info *gpe_xrupt_list = context;
+ u32 interrupt_handled = ACPI_INTERRUPT_NOT_HANDLED;
+
+ ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
+
+ /*
+ * We are guaranteed by the ACPI CA initialization/shutdown code that
+ * if this interrupt handler is installed, ACPI is enabled.
+ */
+
+ /*
+ * GPEs:
+ * Check for and dispatch any GPEs that have occurred
+ */
+ interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
+
+ return_UINT32(interrupt_handled);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_install_sci_handler
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Installs SCI handler.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_install_sci_handler(void)
+{
+ u32 status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(ev_install_sci_handler);
+
+ status =
+ acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
+ acpi_ev_sci_xrupt_handler,
+ acpi_gbl_gpe_xrupt_list_head);
+ return_ACPI_STATUS(status);
+}
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_ev_remove_sci_handler
+ *
+ * PARAMETERS: none
+ *
+ * RETURN: E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * installed to begin with
+ *
+ * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
+ * taken.
+ *
+ * Note: It doesn't seem important to disable all events or set the event
+ * enable registers to their original values. The OS should disable
+ * the SCI interrupt level when the handler is removed, so no more
+ * events will come in.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ev_remove_sci_handler(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+
+ /* Just let the OS remove the handler and disable the level */
+
+ status =
+ acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
+ acpi_ev_sci_xrupt_handler);
+
+ return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
new file mode 100644
index 0000000..94a6efe
--- /dev/null
+++ b/drivers/acpi/events/evxface.c
@@ -0,0 +1,820 @@
+/******************************************************************************
+ *
+ * Module Name: evxface - External interfaces for ACPI events
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+#include <acpi/acinterp.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evxface")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_exception_handler
+ *
+ * PARAMETERS: Handler - Pointer to the handler function for the
+ * event
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function
+ *
+ ******************************************************************************/
+#ifdef ACPI_FUTURE_USAGE
+acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow two handlers. */
+
+ if (acpi_gbl_exception_handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ /* Install the handler */
+
+ acpi_gbl_exception_handler = handler;
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
+#endif /* ACPI_FUTURE_USAGE */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_fixed_event_handler
+ *
+ * PARAMETERS: Event - Event type to enable.
+ * Handler - Pointer to the handler function for the
+ * event
+ * Context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Saves the pointer to the handler function and then enables the
+ * event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_fixed_event_handler(u32 event,
+ acpi_event_handler handler, void *context)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
+
+ /* Parameter validation */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Don't allow two handlers. */
+
+ if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
+ status = AE_ALREADY_EXISTS;
+ goto cleanup;
+ }
+
+ /* Install the handler before enabling the event */
+
+ acpi_gbl_fixed_event_handlers[event].handler = handler;
+ acpi_gbl_fixed_event_handlers[event].context = context;
+
+ status = acpi_clear_event(event);
+ if (ACPI_SUCCESS(status))
+ status = acpi_enable_event(event, 0);
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not enable fixed event %X",
+ event));
+
+ /* Remove the handler */
+
+ acpi_gbl_fixed_event_handlers[event].handler = NULL;
+ acpi_gbl_fixed_event_handlers[event].context = NULL;
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Enabled fixed event %X, Handler=%p\n", event,
+ handler));
+ }
+
+ cleanup:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_fixed_event_handler
+ *
+ * PARAMETERS: Event - Event type to disable.
+ * Handler - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disables the event and unregisters the event handler.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
+
+ /* Parameter validation */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Disable the event before removing the handler */
+
+ status = acpi_disable_event(event, 0);
+
+ /* Always Remove the handler */
+
+ acpi_gbl_fixed_event_handlers[event].handler = NULL;
+ acpi_gbl_fixed_event_handlers[event].context = NULL;
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO,
+ "Could not write to fixed event enable register %X",
+ event));
+ } else {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
+ event));
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_notify_handler
+ *
+ * PARAMETERS: Device - The device for which notifies will be handled
+ * handler_type - The type of handler:
+ * ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
+ * ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
+ * ACPI_ALL_NOTIFY: both system and device
+ * Handler - Address of the handler
+ * Context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for notifies on an ACPI device
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_notify_handler(acpi_handle device,
+ u32 handler_type,
+ acpi_notify_handler handler, void *context)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *notify_obj;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
+
+ /* Parameter validation */
+
+ if ((!device) ||
+ (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * Root Object:
+ * Registering a notify handler on the root object indicates that the
+ * caller wishes to receive notifications for all objects. Note that
+ * only one <external> global handler can be regsitered (per notify type).
+ */
+ if (device == ACPI_ROOT_OBJECT) {
+
+ /* Make sure the handler is not already installed */
+
+ if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+ acpi_gbl_system_notify.handler) ||
+ ((handler_type & ACPI_DEVICE_NOTIFY) &&
+ acpi_gbl_device_notify.handler)) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
+ if (handler_type & ACPI_SYSTEM_NOTIFY) {
+ acpi_gbl_system_notify.node = node;
+ acpi_gbl_system_notify.handler = handler;
+ acpi_gbl_system_notify.context = context;
+ }
+
+ if (handler_type & ACPI_DEVICE_NOTIFY) {
+ acpi_gbl_device_notify.node = node;
+ acpi_gbl_device_notify.handler = handler;
+ acpi_gbl_device_notify.context = context;
+ }
+
+ /* Global notify handler installed */
+ }
+
+ /*
+ * All Other Objects:
+ * Caller will only receive notifications specific to the target object.
+ * Note that only certain object types can receive notifications.
+ */
+ else {
+ /* Notifies allowed on this object? */
+
+ if (!acpi_ev_is_notify_object(node)) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
+
+ /* Object exists - make sure there's no handler */
+
+ if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+ obj_desc->common_notify.system_notify) ||
+ ((handler_type & ACPI_DEVICE_NOTIFY) &&
+ obj_desc->common_notify.device_notify)) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+ } else {
+ /* Create a new object */
+
+ obj_desc = acpi_ut_create_internal_object(node->type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ /* Attach new object to the Node */
+
+ status =
+ acpi_ns_attach_object(device, obj_desc, node->type);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ /* Install the handler */
+
+ notify_obj =
+ acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
+ if (!notify_obj) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ notify_obj->notify.node = node;
+ notify_obj->notify.handler = handler;
+ notify_obj->notify.context = context;
+
+ if (handler_type & ACPI_SYSTEM_NOTIFY) {
+ obj_desc->common_notify.system_notify = notify_obj;
+ }
+
+ if (handler_type & ACPI_DEVICE_NOTIFY) {
+ obj_desc->common_notify.device_notify = notify_obj;
+ }
+
+ if (handler_type == ACPI_ALL_NOTIFY) {
+
+ /* Extra ref if installed in both */
+
+ acpi_ut_add_reference(notify_obj);
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_notify_handler
+ *
+ * PARAMETERS: Device - The device for which notifies will be handled
+ * handler_type - The type of handler:
+ * ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
+ * ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
+ * ACPI_ALL_NOTIFY: both system and device
+ * Handler - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a handler for notifies on an ACPI device
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_notify_handler(acpi_handle device,
+ u32 handler_type, acpi_notify_handler handler)
+{
+ union acpi_operand_object *notify_obj;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
+
+ /* Parameter validation */
+
+ if ((!device) ||
+ (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+ status = AE_BAD_PARAMETER;
+ goto exit;
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Root Object */
+
+ if (device == ACPI_ROOT_OBJECT) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Removing notify handler for namespace root object\n"));
+
+ if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
+ !acpi_gbl_system_notify.handler) ||
+ ((handler_type & ACPI_DEVICE_NOTIFY) &&
+ !acpi_gbl_device_notify.handler)) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure all deferred tasks are completed */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ acpi_os_wait_events_complete(NULL);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ if (handler_type & ACPI_SYSTEM_NOTIFY) {
+ acpi_gbl_system_notify.node = NULL;
+ acpi_gbl_system_notify.handler = NULL;
+ acpi_gbl_system_notify.context = NULL;
+ }
+
+ if (handler_type & ACPI_DEVICE_NOTIFY) {
+ acpi_gbl_device_notify.node = NULL;
+ acpi_gbl_device_notify.handler = NULL;
+ acpi_gbl_device_notify.context = NULL;
+ }
+ }
+
+ /* All Other Objects */
+
+ else {
+ /* Notifies allowed on this object? */
+
+ if (!acpi_ev_is_notify_object(node)) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
+
+ /* Check for an existing internal object */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Object exists - make sure there's an existing handler */
+
+ if (handler_type & ACPI_SYSTEM_NOTIFY) {
+ notify_obj = obj_desc->common_notify.system_notify;
+ if (!notify_obj) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ if (notify_obj->notify.handler != handler) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ /* Make sure all deferred tasks are completed */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ acpi_os_wait_events_complete(NULL);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Remove the handler */
+ obj_desc->common_notify.system_notify = NULL;
+ acpi_ut_remove_reference(notify_obj);
+ }
+
+ if (handler_type & ACPI_DEVICE_NOTIFY) {
+ notify_obj = obj_desc->common_notify.device_notify;
+ if (!notify_obj) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ if (notify_obj->notify.handler != handler) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+ /* Make sure all deferred tasks are completed */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ acpi_os_wait_events_complete(NULL);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Remove the handler */
+ obj_desc->common_notify.device_notify = NULL;
+ acpi_ut_remove_reference(notify_obj);
+ }
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ exit:
+ if (ACPI_FAILURE(status))
+ ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler"));
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_gpe_handler
+ *
+ * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
+ * defined GPEs)
+ * gpe_number - The GPE number within the GPE block
+ * Type - Whether this GPE should be treated as an
+ * edge- or level-triggered interrupt.
+ * Address - Address of the handler
+ * Context - Value passed to the handler on each GPE
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for a General Purpose Event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ u32 type, acpi_event_handler address, void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_handler_info *handler;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
+
+ /* Parameter validation */
+
+ if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) {
+ status = AE_BAD_PARAMETER;
+ goto exit;
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure that there isn't a handler there already */
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_HANDLER) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+
+ /* Allocate and init handler object */
+
+ handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info));
+ if (!handler) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ handler->address = address;
+ handler->context = context;
+ handler->method_node = gpe_event_info->dispatch.method_node;
+
+ /* Disable the GPE before installing the handler */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Install the handler */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ gpe_event_info->dispatch.handler = handler;
+
+ /* Setup up dispatch flags to indicate handler (vs. method) */
+
+ gpe_event_info->flags &= ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); /* Clear bits */
+ gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ exit:
+ if (ACPI_FAILURE(status))
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Installing notify handler failed"));
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_gpe_handler
+ *
+ * PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
+ * defined GPEs)
+ * gpe_number - The event to remove a handler
+ * Address - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a handler for a General Purpose acpi_event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number, acpi_event_handler address)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_handler_info *handler;
+ acpi_status status;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler);
+
+ /* Parameter validation */
+
+ if (!address) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure that a handler is indeed installed */
+
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+ ACPI_GPE_DISPATCH_HANDLER) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure that the installed handler is the same */
+
+ if (gpe_event_info->dispatch.handler->address != address) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Disable the GPE before removing the handler */
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Make sure all deferred tasks are completed */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ acpi_os_wait_events_complete(NULL);
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Remove the handler */
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ handler = gpe_event_info->dispatch.handler;
+
+ /* Restore Method node (if any), set dispatch flags */
+
+ gpe_event_info->dispatch.method_node = handler->method_node;
+ gpe_event_info->flags &= ~ACPI_GPE_DISPATCH_MASK; /* Clear bits */
+ if (handler->method_node) {
+ gpe_event_info->flags |= ACPI_GPE_DISPATCH_METHOD;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Now we can free the handler object */
+
+ ACPI_FREE(handler);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_acquire_global_lock
+ *
+ * PARAMETERS: Timeout - How long the caller is willing to wait
+ * Handle - Where the handle to the lock is returned
+ * (if acquired)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Acquire the ACPI Global Lock
+ *
+ * Note: Allows callers with the same thread ID to acquire the global lock
+ * multiple times. In other words, externally, the behavior of the global lock
+ * is identical to an AML mutex. On the first acquire, a new handle is
+ * returned. On any subsequent calls to acquire by the same thread, the same
+ * handle is returned.
+ *
+ ******************************************************************************/
+acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
+{
+ acpi_status status;
+
+ if (!handle) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Must lock interpreter to prevent race conditions */
+
+ acpi_ex_enter_interpreter();
+
+ status = acpi_ex_acquire_mutex_object(timeout,
+ acpi_gbl_global_lock_mutex,
+ acpi_os_get_thread_id());
+
+ if (ACPI_SUCCESS(status)) {
+
+ /* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */
+
+ *handle = acpi_gbl_global_lock_handle;
+ }
+
+ acpi_ex_exit_interpreter();
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_release_global_lock
+ *
+ * PARAMETERS: Handle - Returned from acpi_acquire_global_lock
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release the ACPI Global Lock. The handle must be valid.
+ *
+ ******************************************************************************/
+acpi_status acpi_release_global_lock(u32 handle)
+{
+ acpi_status status;
+
+ if (!handle || (handle != acpi_gbl_global_lock_handle)) {
+ return (AE_NOT_ACQUIRED);
+ }
+
+ status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
+ return (status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
new file mode 100644
index 0000000..41554f7
--- /dev/null
+++ b/drivers/acpi/events/evxfevnt.c
@@ -0,0 +1,719 @@
+/******************************************************************************
+ *
+ * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acevents.h>
+#include <acpi/acnamesp.h>
+#include <acpi/actables.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evxfevnt")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Transfers the system into ACPI mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_enable(void)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_enable);
+
+ /* ACPI tables must be present */
+
+ if (!acpi_tb_tables_loaded()) {
+ return_ACPI_STATUS(AE_NO_ACPI_TABLES);
+ }
+
+ /* Check current mode */
+
+ if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "System is already in ACPI mode\n"));
+ } else {
+ /* Transition to ACPI mode */
+
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not transition to ACPI mode"));
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "Transition to ACPI mode successful\n"));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_disable
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_disable(void)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_disable);
+
+ if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT,
+ "System is already in legacy (non-ACPI) mode\n"));
+ } else {
+ /* Transition to LEGACY mode */
+
+ status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY);
+
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR((AE_INFO,
+ "Could not exit ACPI mode to legacy mode"));
+ return_ACPI_STATUS(status);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n"));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable_event
+ *
+ * PARAMETERS: Event - The fixed eventto be enabled
+ * Flags - Reserved
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_event(u32 event, u32 flags)
+{
+ acpi_status status = AE_OK;
+ u32 value;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_event);
+
+ /* Decode the Fixed Event */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Enable the requested fixed event (by writing a one to the
+ * enable register bit)
+ */
+ status =
+ acpi_set_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, 1);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Make sure that the hardware responded */
+
+ status =
+ acpi_get_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, &value);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (value != 1) {
+ ACPI_ERROR((AE_INFO,
+ "Could not enable %s event",
+ acpi_ut_get_event_name(event)));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_set_gpe_type
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Type - New GPE type
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Set the type of an individual GPE
+ *
+ ******************************************************************************/
+acpi_status acpi_set_gpe_type(acpi_handle gpe_device, u32 gpe_number, u8 type)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_set_gpe_type);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ if ((gpe_event_info->flags & ACPI_GPE_TYPE_MASK) == type) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Set the new type (will disable GPE if currently enabled) */
+
+ status = acpi_ev_set_gpe_type(gpe_event_info, type);
+
+ unlock_and_exit:
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe_type)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_enable_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Flags - Just enable, or also wake enable?
+ * Called from ISR or not
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Enable an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_enable_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Perform the enable */
+
+ status = acpi_ev_enable_gpe(gpe_event_info, TRUE);
+
+ unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_disable_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Flags - Just disable, or also wake disable?
+ * Called from ISR or not
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+ acpi_status status = AE_OK;
+ acpi_cpu_flags flags;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_disable_gpe);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_ev_disable_gpe(gpe_event_info);
+
+unlock_and_exit:
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_disable_event
+ *
+ * PARAMETERS: Event - The fixed eventto be enabled
+ * Flags - Reserved
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Disable an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_disable_event(u32 event, u32 flags)
+{
+ acpi_status status = AE_OK;
+ u32 value;
+
+ ACPI_FUNCTION_TRACE(acpi_disable_event);
+
+ /* Decode the Fixed Event */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Disable the requested fixed event (by writing a zero to the
+ * enable register bit)
+ */
+ status =
+ acpi_set_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, 0);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status =
+ acpi_get_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, &value);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ if (value != 0) {
+ ACPI_ERROR((AE_INFO,
+ "Could not disable %s events",
+ acpi_ut_get_event_name(event)));
+ return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_disable_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_clear_event
+ *
+ * PARAMETERS: Event - The fixed event to be cleared
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear an ACPI event (fixed)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_event(u32 event)
+{
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_TRACE(acpi_clear_event);
+
+ /* Decode the Fixed Event */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Clear the requested fixed event (By writing a one to the
+ * status register bit)
+ */
+ status =
+ acpi_set_register(acpi_gbl_fixed_event_info[event].
+ status_register_id, 1);
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_event)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_clear_gpe
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Flags - Called from an ISR or not
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Clear an ACPI event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_clear_gpe);
+
+ /* Use semaphore lock if not executing at interrupt level */
+
+ if (flags & ACPI_NOT_ISR) {
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ status = acpi_hw_clear_gpe(gpe_event_info);
+
+ unlock_and_exit:
+ if (flags & ACPI_NOT_ISR) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ }
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_event_status
+ *
+ * PARAMETERS: Event - The fixed event
+ * event_status - Where the current status of the event will
+ * be returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Obtains and returns the current status of the event
+ *
+ ******************************************************************************/
+acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
+{
+ acpi_status status = AE_OK;
+ u32 value;
+
+ ACPI_FUNCTION_TRACE(acpi_get_event_status);
+
+ if (!event_status) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Decode the Fixed Event */
+
+ if (event > ACPI_EVENT_MAX) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Get the status of the requested fixed event */
+
+ status =
+ acpi_get_register(acpi_gbl_fixed_event_info[event].
+ enable_register_id, &value);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+
+ *event_status = value;
+
+ status =
+ acpi_get_register(acpi_gbl_fixed_event_info[event].
+ status_register_id, &value);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+
+ if (value)
+ *event_status |= ACPI_EVENT_FLAG_SET;
+
+ if (acpi_gbl_fixed_event_handlers[event].handler)
+ *event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_event_status)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_gpe_status
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device
+ * gpe_number - GPE level within the GPE block
+ * Flags - Called from an ISR or not
+ * event_status - Where the current status of the event will
+ * be returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get status of an event (general purpose)
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_gpe_status(acpi_handle gpe_device,
+ u32 gpe_number, u32 flags, acpi_event_status * event_status)
+{
+ acpi_status status = AE_OK;
+ struct acpi_gpe_event_info *gpe_event_info;
+
+ ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
+
+ /* Use semaphore lock if not executing at interrupt level */
+
+ if (flags & ACPI_NOT_ISR) {
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Obtain status on the requested GPE number */
+
+ status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
+
+ if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
+ *event_status |= ACPI_EVENT_FLAG_HANDLE;
+
+ unlock_and_exit:
+ if (flags & ACPI_NOT_ISR) {
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+ }
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
+ * gpe_block_address - Address and space_iD
+ * register_count - Number of GPE register pairs in the block
+ * interrupt_number - H/W interrupt for the block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Create and Install a block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_block(acpi_handle gpe_device,
+ struct acpi_generic_address *gpe_block_address,
+ u32 register_count, u32 interrupt_number)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ struct acpi_namespace_node *node;
+ struct acpi_gpe_block_info *gpe_block;
+
+ ACPI_FUNCTION_TRACE(acpi_install_gpe_block);
+
+ if ((!gpe_device) || (!gpe_block_address) || (!register_count)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ node = acpi_ns_map_handle_to_node(gpe_device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * For user-installed GPE Block Devices, the gpe_block_base_number
+ * is always zero
+ */
+ status =
+ acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0,
+ interrupt_number, &gpe_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Run the _PRW methods and enable the GPEs */
+
+ status = acpi_ev_initialize_gpe_block(node, gpe_block);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Get the device_object attached to the node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+
+ /* No object, create a new one */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ status =
+ acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE);
+
+ /* Remove local reference to the object */
+
+ acpi_ut_remove_reference(obj_desc);
+
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
+
+ /* Install the GPE block in the device_object */
+
+ obj_desc->device.gpe_block = gpe_block;
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_block)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_gpe_block
+ *
+ * PARAMETERS: gpe_device - Handle to the parent GPE Block Device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a previously installed block of GPE registers
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_gpe_block);
+
+ if (!gpe_device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ node = acpi_ns_map_handle_to_node(gpe_device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Get the device_object attached to the node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc || !obj_desc->device.gpe_block) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
+
+ /* Delete the GPE block (but not the device_object) */
+
+ status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block);
+ if (ACPI_SUCCESS(status)) {
+ obj_desc->device.gpe_block = NULL;
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
diff --git a/drivers/acpi/events/evxfregn.c b/drivers/acpi/events/evxfregn.c
new file mode 100644
index 0000000..e875080
--- /dev/null
+++ b/drivers/acpi/events/evxfregn.c
@@ -0,0 +1,253 @@
+/******************************************************************************
+ *
+ * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and
+ * Address Spaces.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2008, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include <acpi/acnamesp.h>
+#include <acpi/acevents.h>
+
+#define _COMPONENT ACPI_EVENTS
+ACPI_MODULE_NAME("evxfregn")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_install_address_space_handler
+ *
+ * PARAMETERS: Device - Handle for the device
+ * space_id - The address space ID
+ * Handler - Address of the handler
+ * Setup - Address of the setup function
+ * Context - Value passed to the handler on each access
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Install a handler for all op_regions of a given space_id.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_address_space_handler(acpi_handle device,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup, void *context)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_install_address_space_handler);
+
+ /* Parameter validation */
+
+ if (!device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(device);
+ if (!node) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Install the handler for all Regions for this Space ID */
+
+ status =
+ acpi_ev_install_space_handler(node, space_id, handler, setup,
+ context);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ /* Run all _REG methods for this address space */
+
+ status = acpi_ev_execute_reg_methods(node, space_id);
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_remove_address_space_handler
+ *
+ * PARAMETERS: Device - Handle for the device
+ * space_id - The address space ID
+ * Handler - Address of the handler
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Remove a previously installed handler.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_remove_address_space_handler(acpi_handle device,
+ acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler)
+{
+ union acpi_operand_object *obj_desc;
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *region_obj;
+ union acpi_operand_object **last_obj_ptr;
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler);
+
+ /* Parameter validation */
+
+ if (!device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_map_handle_to_node(device);
+ if (!node ||
+ ((node->type != ACPI_TYPE_DEVICE) &&
+ (node->type != ACPI_TYPE_PROCESSOR) &&
+ (node->type != ACPI_TYPE_THERMAL) &&
+ (node != acpi_gbl_root_node))) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Make sure the internal object exists */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
+
+ /* Find the address handler the user requested */
+
+ handler_obj = obj_desc->device.handler;
+ last_obj_ptr = &obj_desc->device.handler;
+ while (handler_obj) {
+
+ /* We have a handler, see if user requested this one */
+
+ if (handler_obj->address_space.space_id == space_id) {
+
+ /* Handler must be the same as the installed handler */
+
+ if (handler_obj->address_space.handler != handler) {
+ status = AE_BAD_PARAMETER;
+ goto unlock_and_exit;
+ }
+
+ /* Matched space_id, first dereference this in the Regions */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Removing address handler %p(%p) for region %s on Device %p(%p)\n",
+ handler_obj, handler,
+ acpi_ut_get_region_name(space_id),
+ node, obj_desc));
+
+ region_obj = handler_obj->address_space.region_list;
+
+ /* Walk the handler's region list */
+
+ while (region_obj) {
+ /*
+ * First disassociate the handler from the region.
+ *
+ * NOTE: this doesn't mean that the region goes away
+ * The region is just inaccessible as indicated to
+ * the _REG method
+ */
+ acpi_ev_detach_region(region_obj, TRUE);
+
+ /*
+ * Walk the list: Just grab the head because the
+ * detach_region removed the previous head.
+ */
+ region_obj =
+ handler_obj->address_space.region_list;
+
+ }
+
+ /* Remove this Handler object from the list */
+
+ *last_obj_ptr = handler_obj->address_space.next;
+
+ /* Now we can delete the handler object */
+
+ acpi_ut_remove_reference(handler_obj);
+ goto unlock_and_exit;
+ }
+
+ /* Walk the linked list of handlers */
+
+ last_obj_ptr = &handler_obj->address_space.next;
+ handler_obj = handler_obj->address_space.next;
+ }
+
+ /* The handler does not exist */
+
+ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
+ "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n",
+ handler, acpi_ut_get_region_name(space_id), space_id,
+ node, obj_desc));
+
+ status = AE_NOT_EXIST;
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
OpenPOWER on IntegriCloud