summaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorBob Moore <robert.moore@intel.com>2005-12-02 18:27:00 -0500
committerLen Brown <len.brown@intel.com>2005-12-10 00:29:11 -0500
commit28f55ebce5bd2fceec8adc7c8860953d3e4532a8 (patch)
tree2c5c10c18e51f9a717514dfccdc287fc517730c6 /drivers/acpi
parentc51a4de85de720670f2fbc592a6f8040af72ad87 (diff)
downloadop-kernel-dev-28f55ebce5bd2fceec8adc7c8860953d3e4532a8.zip
op-kernel-dev-28f55ebce5bd2fceec8adc7c8860953d3e4532a8.tar.gz
[ACPI] ACPICA 20051202
Modified the parsing of control methods to no longer create namespace objects during the first pass of the parse. Objects are now created only during the execute phase, at the moment the namespace creation operator is encountered in the AML (Name, OperationRegion, CreateByteField, etc.) This should eliminate ALREADY_EXISTS exceptions seen on some machines where reentrant control methods are protected by an AML mutex. The mutex will now correctly block multiple threads from attempting to create the same object more than once. Increased the number of available Owner Ids for namespace object tracking from 32 to 255. This should eliminate the OWNER_ID_LIMIT exceptions seen on some machines with a large number of ACPI tables (either static or dynamic). Enhanced the namespace dump routine to output the owner ID for each namespace object. Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c337
-rw-r--r--drivers/acpi/dispatcher/dswload.c132
-rw-r--r--drivers/acpi/namespace/nsdump.c4
-rw-r--r--drivers/acpi/utilities/utglobal.c9
-rw-r--r--drivers/acpi/utilities/utmisc.c79
5 files changed, 338 insertions, 223 deletions
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 36c1ca0..58ad00b 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -53,133 +53,6 @@ ACPI_MODULE_NAME("dsmethod")
/*******************************************************************************
*
- * FUNCTION: acpi_ds_parse_method
- *
- * PARAMETERS: Node - Method node
- *
- * RETURN: Status
- *
- * DESCRIPTION: Parse the AML that is associated with the method.
- *
- * MUTEX: Assumes parser is locked
- *
- ******************************************************************************/
-acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
-{
- acpi_status status;
- union acpi_operand_object *obj_desc;
- union acpi_parse_object *op;
- struct acpi_walk_state *walk_state;
-
- ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
-
- /* Parameter Validation */
-
- if (!node) {
- return_ACPI_STATUS(AE_NULL_ENTRY);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "**** Parsing [%4.4s] **** named_obj=%p\n",
- acpi_ut_get_node_name(node), node));
-
- /* Extract the method object from the method Node */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc) {
- return_ACPI_STATUS(AE_NULL_OBJECT);
- }
-
- /* Create a mutex for the method if there is a concurrency limit */
-
- if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
- (!obj_desc->method.semaphore)) {
- status = acpi_os_create_semaphore(obj_desc->method.concurrency,
- obj_desc->method.concurrency,
- &obj_desc->method.semaphore);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
-
- /*
- * Allocate a new parser op to be the root of the parsed
- * method tree
- */
- op = acpi_ps_alloc_op(AML_METHOD_OP);
- if (!op) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /* Init new op with the method name and pointer back to the Node */
-
- acpi_ps_set_name(op, node->name.integer);
- op->common.node = node;
-
- /*
- * Get a new owner_id for objects created by this method. Namespace
- * objects (such as Operation Regions) can be created during the
- * first pass parse.
- */
- status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- /* Create and initialize a new walk state */
-
- walk_state =
- acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
- NULL);
- if (!walk_state) {
- status = AE_NO_MEMORY;
- goto cleanup2;
- }
-
- status = acpi_ds_init_aml_walk(walk_state, op, node,
- obj_desc->method.aml_start,
- obj_desc->method.aml_length, NULL, 1);
- if (ACPI_FAILURE(status)) {
- acpi_ds_delete_walk_state(walk_state);
- goto cleanup2;
- }
-
- /*
- * Parse the method, first pass
- *
- * The first pass load is where newly declared named objects are added into
- * the namespace. Actual evaluation of the named objects (what would be
- * called a "second pass") happens during the actual execution of the
- * method so that operands to the named objects can take on dynamic
- * run-time values.
- */
- status = acpi_ps_parse_aml(walk_state);
- if (ACPI_FAILURE(status)) {
- goto cleanup2;
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
- "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
- acpi_ut_get_node_name(node), node, op));
-
- /*
- * Delete the parse tree. We simply re-parse the method for every
- * execution since there isn't much overhead (compared to keeping lots
- * of parse trees around)
- */
- acpi_ns_delete_namespace_subtree(node);
- acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);
-
- cleanup2:
- acpi_ut_release_owner_id(&obj_desc->method.owner_id);
-
- cleanup:
- acpi_ps_delete_parse_tree(op);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ds_begin_method_execution
*
* PARAMETERS: method_node - Node of the method
@@ -193,7 +66,6 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
* for clearance to execute.
*
******************************************************************************/
-
acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
union acpi_operand_object *obj_desc,
@@ -545,16 +417,54 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
}
}
+ /*
+ * There are no more threads executing this method. Perform
+ * additional cleanup.
+ *
+ * The method Node is stored in the walk state
+ */
+ method_node = walk_state->method_node;
+
+ /* Lock namespace for possible update */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ goto exit;
+ }
+
+ /*
+ * Delete any namespace entries created immediately underneath
+ * the method
+ */
+ if (method_node->child) {
+ acpi_ns_delete_namespace_subtree(method_node);
+ }
+
+ /*
+ * Delete any namespace entries created anywhere else within
+ * the namespace by the execution of this method
+ */
+ acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method.
+ owner_id);
+ status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+ /* Are there any other threads currently executing this method? */
+
if (walk_state->method_desc->method.thread_count) {
+ /*
+ * Additional threads. Do not release the owner_id in this case,
+ * we immediately reuse it for the next thread executing this method
+ */
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
- "*** Not deleting method namespace, there are still %d threads\n",
+ "*** Completed execution of one thread, %d threads remaining\n",
walk_state->method_desc->method.
thread_count));
- } else { /* This is the last executing thread */
+ } else {
+ /* This is the only executing thread for this method */
/*
* Support to dynamically change a method from not_serialized to
- * Serialized if it appears that the method is written foolishly and
+ * Serialized if it appears that the method is incorrectly written and
* does not support multiple thread execution. The best example of this
* is if such a method creates namespace objects and blocks. A second
* thread will fail with an AE_ALREADY_EXISTS exception
@@ -570,34 +480,8 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
semaphore);
}
- /*
- * There are no more threads executing this method. Perform
- * additional cleanup.
- *
- * The method Node is stored in the walk state
- */
- method_node = walk_state->method_node;
-
- /*
- * Delete any namespace entries created immediately underneath
- * the method
- */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- goto exit;
- }
-
- if (method_node->child) {
- acpi_ns_delete_namespace_subtree(method_node);
- }
+ /* No more threads, we can free the owner_id */
- /*
- * Delete any namespace entries created anywhere else within
- * the namespace
- */
- acpi_ns_delete_namespace_by_owner(walk_state->method_desc->
- method.owner_id);
- status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
acpi_ut_release_owner_id(&walk_state->method_desc->method.
owner_id);
}
@@ -606,3 +490,140 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
(void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
return_VOID;
}
+
+#ifdef ACPI_INIT_PARSE_METHODS
+ /*
+ * Note 11/2005: Removed this code to parse all methods during table
+ * load because it causes problems if there are any errors during the
+ * parse. Also, it seems like overkill and we probably don't want to
+ * abort a table load because of an issue with a single method.
+ */
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_parse_method
+ *
+ * PARAMETERS: Node - Method node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Parse the AML that is associated with the method.
+ *
+ * MUTEX: Assumes parser is locked
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+ union acpi_parse_object *op;
+ struct acpi_walk_state *walk_state;
+
+ ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
+
+ /* Parameter Validation */
+
+ if (!node) {
+ return_ACPI_STATUS(AE_NULL_ENTRY);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "**** Parsing [%4.4s] **** named_obj=%p\n",
+ acpi_ut_get_node_name(node), node));
+
+ /* Extract the method object from the method Node */
+
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
+
+ /* Create a mutex for the method if there is a concurrency limit */
+
+ if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) &&
+ (!obj_desc->method.semaphore)) {
+ status = acpi_os_create_semaphore(obj_desc->method.concurrency,
+ obj_desc->method.concurrency,
+ &obj_desc->method.semaphore);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ /*
+ * Allocate a new parser op to be the root of the parsed
+ * method tree
+ */
+ op = acpi_ps_alloc_op(AML_METHOD_OP);
+ if (!op) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /* Init new op with the method name and pointer back to the Node */
+
+ acpi_ps_set_name(op, node->name.integer);
+ op->common.node = node;
+
+ /*
+ * Get a new owner_id for objects created by this method. Namespace
+ * objects (such as Operation Regions) can be created during the
+ * first pass parse.
+ */
+ status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Create and initialize a new walk state */
+
+ walk_state =
+ acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL,
+ NULL);
+ if (!walk_state) {
+ status = AE_NO_MEMORY;
+ goto cleanup2;
+ }
+
+ status = acpi_ds_init_aml_walk(walk_state, op, node,
+ obj_desc->method.aml_start,
+ obj_desc->method.aml_length, NULL, 1);
+ if (ACPI_FAILURE(status)) {
+ acpi_ds_delete_walk_state(walk_state);
+ goto cleanup2;
+ }
+
+ /*
+ * Parse the method, first pass
+ *
+ * The first pass load is where newly declared named objects are added into
+ * the namespace. Actual evaluation of the named objects (what would be
+ * called a "second pass") happens during the actual execution of the
+ * method so that operands to the named objects can take on dynamic
+ * run-time values.
+ */
+ status = acpi_ps_parse_aml(walk_state);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup2;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
+ acpi_ut_get_node_name(node), node, op));
+
+ /*
+ * Delete the parse tree. We simply re-parse the method for every
+ * execution since there isn't much overhead (compared to keeping lots
+ * of parse trees around)
+ */
+ acpi_ns_delete_namespace_subtree(node);
+ acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id);
+
+ cleanup2:
+ acpi_ut_release_owner_id(&obj_desc->method.owner_id);
+
+ cleanup:
+ acpi_ps_delete_parse_tree(op);
+ return_ACPI_STATUS(status);
+}
+#endif
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index 89d318c..44d4f4b 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -127,7 +127,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
char *path;
u32 flags;
- ACPI_FUNCTION_NAME("ds_load1_begin_op");
+ ACPI_FUNCTION_TRACE("ds_load1_begin_op");
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@@ -138,14 +138,14 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
if (op) {
if (!(walk_state->op_info->flags & AML_NAMED)) {
*out_op = op;
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/* Check if this object has already been installed in the namespace */
if (op->common.node) {
*out_op = op;
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
}
@@ -188,7 +188,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
#endif
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(path, status);
- return (status);
+ return_ACPI_STATUS(status);
}
/*
@@ -235,7 +235,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
ACPI_REPORT_ERROR(("Invalid type (%s) for target of Scope operator [%4.4s] (Cannot override)\n", acpi_ut_get_type_name(node->type), path));
- return (AE_AML_OPERAND_TYPE);
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
break;
@@ -257,6 +257,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* buffer_field, or Package), the name of the object is already
* in the namespace.
*/
+
if (walk_state->deferred_node) {
/* This name is already in the namespace, get the node */
@@ -265,6 +266,16 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
break;
}
+ /*
+ * If we are executing a method, do not create any namespace objects
+ * during the load phase, only during execution.
+ */
+ if (walk_state->method_node) {
+ node = NULL;
+ status = AE_OK;
+ break;
+ }
+
flags = ACPI_NS_NO_UPSEARCH;
if ((walk_state->opcode != AML_SCOPE_OP) &&
(!(walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP))) {
@@ -290,7 +301,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
&(node));
if (ACPI_FAILURE(status)) {
ACPI_REPORT_NSERROR(path, status);
- return (status);
+ return_ACPI_STATUS(status);
}
break;
}
@@ -302,28 +313,29 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
op = acpi_ps_alloc_op(walk_state->opcode);
if (!op) {
- return (AE_NO_MEMORY);
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
}
- /* Initialize */
-
- op->named.name = node->name.integer;
+ /* Initialize the op */
#if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY))
op->named.path = ACPI_CAST_PTR(u8, path);
#endif
- /*
- * Put the Node in the "op" object that the parser uses, so we
- * can get it again quickly when this scope is closed
- */
- op->common.node = node;
+ if (node) {
+ /*
+ * Put the Node in the "op" object that the parser uses, so we
+ * can get it again quickly when this scope is closed
+ */
+ op->common.node = node;
+ op->named.name = node->name.integer;
+ }
+
acpi_ps_append_arg(acpi_ps_get_parent_scope(&walk_state->parser_state),
op);
-
*out_op = op;
- return (status);
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -339,13 +351,13 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
*
******************************************************************************/
-acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
+acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
{
union acpi_parse_object *op;
acpi_object_type object_type;
acpi_status status = AE_OK;
- ACPI_FUNCTION_NAME("ds_load1_end_op");
+ ACPI_FUNCTION_TRACE("ds_load1_end_op");
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
@@ -354,7 +366,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
/* We are only interested in opcodes that have an associated name */
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/* Get the object type to determine if we should pop the scope */
@@ -363,21 +375,37 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
#ifndef ACPI_NO_METHOD_EXECUTION
if (walk_state->op_info->flags & AML_FIELD) {
- if (walk_state->opcode == AML_FIELD_OP ||
- walk_state->opcode == AML_BANK_FIELD_OP ||
- walk_state->opcode == AML_INDEX_FIELD_OP) {
- status = acpi_ds_init_field_objects(op, walk_state);
+ /*
+ * If we are executing a method, do not create any namespace objects
+ * during the load phase, only during execution.
+ */
+ if (!walk_state->method_node) {
+ if (walk_state->opcode == AML_FIELD_OP ||
+ walk_state->opcode == AML_BANK_FIELD_OP ||
+ walk_state->opcode == AML_INDEX_FIELD_OP) {
+ status =
+ acpi_ds_init_field_objects(op, walk_state);
+ }
}
- return (status);
+ return_ACPI_STATUS(status);
}
- if (op->common.aml_opcode == AML_REGION_OP) {
- status = acpi_ex_create_region(op->named.data, op->named.length,
- (acpi_adr_space_type)
- ((op->common.value.arg)->common.
- value.integer), walk_state);
- if (ACPI_FAILURE(status)) {
- return (status);
+ /*
+ * If we are executing a method, do not create any namespace objects
+ * during the load phase, only during execution.
+ */
+ if (!walk_state->method_node) {
+ if (op->common.aml_opcode == AML_REGION_OP) {
+ status =
+ acpi_ex_create_region(op->named.data,
+ op->named.length,
+ (acpi_adr_space_type)
+ ((op->common.value.arg)->
+ common.value.integer),
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
}
}
#endif
@@ -391,7 +419,12 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
common.
aml_opcode))->
object_type;
- op->common.node->type = (u8) object_type;
+
+ /* Set node type if we have a namespace node */
+
+ if (op->common.node) {
+ op->common.node->type = (u8) object_type;
+ }
}
}
@@ -424,7 +457,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
walk_state->num_operands = 0;
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
}
}
@@ -439,7 +472,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
status = acpi_ds_scope_stack_pop(walk_state);
}
- return (status);
+ return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -456,8 +489,8 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state * walk_state)
******************************************************************************/
acpi_status
-acpi_ds_load2_begin_op(struct acpi_walk_state * walk_state,
- union acpi_parse_object ** out_op)
+acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op)
{
union acpi_parse_object *op;
struct acpi_namespace_node *node;
@@ -840,6 +873,13 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
case AML_TYPE_NAMED_FIELD:
+ /*
+ * If we are executing a method, initialize the field
+ */
+ if (walk_state->method_node) {
+ status = acpi_ds_init_field_objects(op, walk_state);
+ }
+
switch (op->common.aml_opcode) {
case AML_INDEX_FIELD_OP:
@@ -929,6 +969,24 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
switch (op->common.aml_opcode) {
#ifndef ACPI_NO_METHOD_EXECUTION
case AML_REGION_OP:
+
+ /*
+ * If we are executing a method, initialize the region
+ */
+ if (walk_state->method_node) {
+ status =
+ acpi_ex_create_region(op->named.data,
+ op->named.length,
+ (acpi_adr_space_type)
+ ((op->common.value.
+ arg)->common.value.
+ integer),
+ walk_state);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
/*
* The op_region is not fully parsed at this time. Only valid
* argument is the space_id. (We must save the address of the
diff --git a/drivers/acpi/namespace/nsdump.c b/drivers/acpi/namespace/nsdump.c
index 9faf1d5..864c642 100644
--- a/drivers/acpi/namespace/nsdump.c
+++ b/drivers/acpi/namespace/nsdump.c
@@ -212,7 +212,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
/*
* Now we can print out the pertinent information
*/
- acpi_os_printf(" %-12s %p ", acpi_ut_get_type_name(type), this_node);
+ acpi_os_printf(" %-12s %p %2.2X ",
+ acpi_ut_get_type_name(type), this_node,
+ this_node->owner_id);
dbg_level = acpi_dbg_level;
acpi_dbg_level = 0;
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index d6813d8..6828c7a 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -793,6 +793,11 @@ void acpi_ut_init_globals(void)
acpi_gbl_mutex_info[i].use_count = 0;
}
+ for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
+ acpi_gbl_owner_id_mask[i] = 0;
+ }
+ acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000; /* Last ID is never valid */
+
/* GPE support */
acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -830,8 +835,8 @@ void acpi_ut_init_globals(void)
acpi_gbl_ns_lookup_count = 0;
acpi_gbl_ps_find_count = 0;
acpi_gbl_acpi_hardware_present = TRUE;
- acpi_gbl_owner_id_mask = 0;
- acpi_gbl_last_owner_id = 0;
+ acpi_gbl_last_owner_id_index = 0;
+ acpi_gbl_next_owner_id_offset = 0;
acpi_gbl_trace_method_name = 0;
acpi_gbl_trace_dbg_level = 0;
acpi_gbl_trace_dbg_layer = 0;
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 89efba7..64dd64b 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -64,6 +64,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
{
acpi_native_uint i;
acpi_native_uint j;
+ acpi_native_uint k;
acpi_status status;
ACPI_FUNCTION_TRACE("ut_allocate_owner_id");
@@ -85,32 +86,50 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
/*
* Find a free owner ID, cycle through all possible IDs on repeated
- * allocations. Note: Index for next possible ID is equal to the value
- * of the last allocated ID.
+ * allocations. (ACPI_NUM_OWNERID_MASKS + 1) because first index may have
+ * to be scanned twice.
*/
- for (i = 0, j = acpi_gbl_last_owner_id; i < 32; i++, j++) {
- if (j >= 32) {
- j = 0; /* Wraparound to ID start */
+ for (i = 0, j = acpi_gbl_last_owner_id_index;
+ i < (ACPI_NUM_OWNERID_MASKS + 1); i++, j++) {
+ if (j >= ACPI_NUM_OWNERID_MASKS) {
+ j = 0; /* Wraparound to start of mask array */
}
- if (!(acpi_gbl_owner_id_mask & (1 << j))) {
- /*
- * Found a free ID. The actual ID is the bit index plus one,
- * making zero an invalid Owner ID. Save this as the last ID
- * allocated and update the global ID mask.
- */
- acpi_gbl_last_owner_id = (acpi_owner_id) (j + 1);
- *owner_id = acpi_gbl_last_owner_id;
+ for (k = acpi_gbl_next_owner_id_offset; k < 32; k++) {
+ if (acpi_gbl_owner_id_mask[j] == ACPI_UINT32_MAX) {
+ /* There are no free IDs in this mask */
- ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
- "Current owner_id mask: %8.8X New ID: %2.2X\n",
- acpi_gbl_owner_id_mask,
- (unsigned int)
- acpi_gbl_last_owner_id));
+ break;
+ }
- acpi_gbl_owner_id_mask |= (1 << j);
- goto exit;
+ if (!(acpi_gbl_owner_id_mask[j] & (1 << k))) {
+ /*
+ * Found a free ID. The actual ID is the bit index plus one,
+ * making zero an invalid Owner ID. Save this as the last ID
+ * allocated and update the global ID mask.
+ */
+ acpi_gbl_owner_id_mask[j] |= (1 << k);
+
+ acpi_gbl_last_owner_id_index = (u8) j;
+ acpi_gbl_next_owner_id_offset = (u8) (k + 1);
+
+ /*
+ * Construct encoded ID from the index and bit position
+ *
+ * Note: Last [j].k (bit 255) is never used and is marked
+ * permanently allocated (prevents +1 overflow)
+ */
+ *owner_id =
+ (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
+
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Allocated owner_id: %2.2X\n",
+ (unsigned int)*owner_id));
+ goto exit;
+ }
}
+
+ acpi_gbl_next_owner_id_offset = 0;
}
/*
@@ -124,7 +143,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
* methods, or there may be a bug where the IDs are not released.
*/
status = AE_OWNER_ID_LIMIT;
- ACPI_REPORT_ERROR(("Could not allocate new owner_id (32 max), AE_OWNER_ID_LIMIT\n"));
+ ACPI_REPORT_ERROR(("Could not allocate new owner_id (255 max), AE_OWNER_ID_LIMIT\n"));
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
@@ -141,7 +160,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
* control method or unloading a table. Either way, we would
* ignore any error anyway.
*
- * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 32
+ * DESCRIPTION: Release a table or method owner ID. Valid IDs are 1 - 255
*
******************************************************************************/
@@ -149,6 +168,8 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
{
acpi_owner_id owner_id = *owner_id_ptr;
acpi_status status;
+ acpi_native_uint index;
+ u32 bit;
ACPI_FUNCTION_TRACE_U32("ut_release_owner_id", owner_id);
@@ -158,7 +179,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
/* Zero is not a valid owner_iD */
- if ((owner_id == 0) || (owner_id > 32)) {
+ if ((owner_id == 0) || (owner_id > 255)) {
ACPI_REPORT_ERROR(("Invalid owner_id: %2.2X\n", owner_id));
return_VOID;
}
@@ -174,10 +195,18 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
owner_id--;
+ /* Decode ID to index/offset pair */
+
+ index = ACPI_DIV_32(owner_id);
+ bit = 1 << ACPI_MOD_32(owner_id);
+
/* Free the owner ID only if it is valid */
- if (acpi_gbl_owner_id_mask & (1 << owner_id)) {
- acpi_gbl_owner_id_mask ^= (1 << owner_id);
+ if (acpi_gbl_owner_id_mask[index] & bit) {
+ acpi_gbl_owner_id_mask[index] ^= bit;
+ } else {
+ ACPI_REPORT_ERROR(("Release of non-allocated owner_id: %2.2X\n",
+ owner_id + 1));
}
(void)acpi_ut_release_mutex(ACPI_MTX_CACHES);
OpenPOWER on IntegriCloud