本文整理汇总了C++中ACPI_FUNCTION_TRACE函数的典型用法代码示例。如果您正苦于以下问题:C++ ACPI_FUNCTION_TRACE函数的具体用法?C++ ACPI_FUNCTION_TRACE怎么用?C++ ACPI_FUNCTION_TRACE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ACPI_FUNCTION_TRACE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: acpi_ex_do_logical_op
acpi_status
acpi_ex_do_logical_op (
u16 opcode,
union acpi_operand_object *operand0,
union acpi_operand_object *operand1,
u8 *logical_result)
{
union acpi_operand_object *local_operand1 = operand1;
acpi_integer integer0;
acpi_integer integer1;
u32 length0;
u32 length1;
acpi_status status = AE_OK;
u8 local_result = FALSE;
int compare;
ACPI_FUNCTION_TRACE ("ex_do_logical_op");
/*
* Convert the second operand if necessary. The first operand
* determines the type of the second operand, (See the Data Types
* section of the ACPI 3.0+ specification.) Both object types are
* guaranteed to be either Integer/String/Buffer by the operand
* resolution mechanism.
*/
switch (ACPI_GET_OBJECT_TYPE (operand0)) {
case ACPI_TYPE_INTEGER:
status = acpi_ex_convert_to_integer (operand1, &local_operand1, 16);
break;
case ACPI_TYPE_STRING:
status = acpi_ex_convert_to_string (operand1, &local_operand1,
ACPI_IMPLICIT_CONVERT_HEX);
break;
case ACPI_TYPE_BUFFER:
status = acpi_ex_convert_to_buffer (operand1, &local_operand1);
break;
default:
status = AE_AML_INTERNAL;
break;
}
if (ACPI_FAILURE (status)) {
goto cleanup;
}
/*
* Two cases: 1) Both Integers, 2) Both Strings or Buffers
*/
if (ACPI_GET_OBJECT_TYPE (operand0) == ACPI_TYPE_INTEGER) {
/*
* 1) Both operands are of type integer
* Note: local_operand1 may have changed above
*/
integer0 = operand0->integer.value;
integer1 = local_operand1->integer.value;
switch (opcode) {
case AML_LEQUAL_OP: /* LEqual (Operand0, Operand1) */
if (integer0 == integer1) {
local_result = TRUE;
}
break;
case AML_LGREATER_OP: /* LGreater (Operand0, Operand1) */
if (integer0 > integer1) {
local_result = TRUE;
}
break;
case AML_LLESS_OP: /* LLess (Operand0, Operand1) */
if (integer0 < integer1) {
local_result = TRUE;
}
break;
default:
status = AE_AML_INTERNAL;
break;
}
}
else {
/*
* 2) Both operands are Strings or both are Buffers
* Note: Code below takes advantage of common Buffer/String
* object fields. local_operand1 may have changed above. Use
* memcmp to handle nulls in buffers.
*/
length0 = operand0->buffer.length;
length1 = local_operand1->buffer.length;
/* Lexicographic compare: compare the data bytes */
//.........这里部分代码省略.........
示例2: acpi_tb_load_namespace
/*******************************************************************************
*
* FUNCTION: acpi_tb_load_namespace
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
* the RSDT/XSDT.
*
******************************************************************************/
static acpi_status acpi_tb_load_namespace(void)
{
acpi_status status;
u32 i;
struct acpi_table_header *new_dsdt;
ACPI_FUNCTION_TRACE(tb_load_namespace);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/*
* Load the namespace. The DSDT is required, but any SSDT and
* PSDT tables are optional. Verify the DSDT.
*/
if (!acpi_gbl_root_table_list.current_table_count ||
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.
tables[ACPI_TABLE_INDEX_DSDT].signature),
ACPI_SIG_DSDT)
||
ACPI_FAILURE(acpi_tb_verify_table
(&acpi_gbl_root_table_list.
tables[ACPI_TABLE_INDEX_DSDT]))) {
status = AE_NO_ACPI_TABLES;
goto unlock_and_exit;
}
/*
* Save the DSDT pointer for simple access. This is the mapped memory
* address. We must take care here because the address of the .Tables
* array can change dynamically as tables are loaded at run-time. Note:
* .Pointer field is not validated until after call to acpi_tb_verify_table.
*/
acpi_gbl_DSDT =
acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
/*
* Optionally copy the entire DSDT to local memory (instead of simply
* mapping it.) There are some BIOSs that corrupt or replace the original
* DSDT, creating the need for this option. Default is FALSE, do not copy
* the DSDT.
*/
if (acpi_gbl_copy_dsdt_locally) {
new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
if (new_dsdt) {
acpi_gbl_DSDT = new_dsdt;
}
}
/*
* Save the original DSDT header for detection of table corruption
* and/or replacement of the DSDT from outside the OS.
*/
ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
sizeof(struct acpi_table_header));
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
/* Load and parse tables */
status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if ((!ACPI_COMPARE_NAME
(&(acpi_gbl_root_table_list.tables[i].signature),
ACPI_SIG_SSDT)
&&
!ACPI_COMPARE_NAME(&
(acpi_gbl_root_table_list.tables[i].
signature), ACPI_SIG_PSDT))
||
ACPI_FAILURE(acpi_tb_verify_table
(&acpi_gbl_root_table_list.tables[i]))) {
continue;
}
if (no_auto_ssdt) {
#ifdef CONFIG_DEBUG_PRINTK
printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
#else
;
#endif
//.........这里部分代码省略.........
示例3: AcpiDsScopeStackPush
ACPI_STATUS
AcpiDsScopeStackPush (
ACPI_NAMESPACE_NODE *Node,
ACPI_OBJECT_TYPE Type,
ACPI_WALK_STATE *WalkState)
{
ACPI_GENERIC_STATE *ScopeInfo;
ACPI_GENERIC_STATE *OldScopeInfo;
ACPI_FUNCTION_TRACE (DsScopeStackPush);
if (!Node)
{
/* Invalid scope */
ACPI_ERROR ((AE_INFO, "Null scope parameter"));
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
/* Make sure object type is valid */
if (!AcpiUtValidObjectType (Type))
{
ACPI_WARNING ((AE_INFO,
"Invalid object type: 0x%X", Type));
}
/* Allocate a new scope object */
ScopeInfo = AcpiUtCreateGenericState ();
if (!ScopeInfo)
{
return_ACPI_STATUS (AE_NO_MEMORY);
}
/* Init new scope object */
ScopeInfo->Common.DescriptorType = ACPI_DESC_TYPE_STATE_WSCOPE;
ScopeInfo->Scope.Node = Node;
ScopeInfo->Common.Value = (UINT16) Type;
WalkState->ScopeDepth++;
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"[%.2d] Pushed scope ", (UINT32) WalkState->ScopeDepth));
OldScopeInfo = WalkState->ScopeInfo;
if (OldScopeInfo)
{
ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC,
"[%4.4s] (%s)",
AcpiUtGetNodeName (OldScopeInfo->Scope.Node),
AcpiUtGetTypeName (OldScopeInfo->Common.Value)));
}
else
{
ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC,
"[\\___] (%s)", "ROOT"));
}
ACPI_DEBUG_PRINT_RAW ((ACPI_DB_EXEC,
", New scope -> [%4.4s] (%s)\n",
AcpiUtGetNodeName (ScopeInfo->Scope.Node),
AcpiUtGetTypeName (ScopeInfo->Common.Value)));
/* Push new scope object onto stack */
AcpiUtPushGenericState (&WalkState->ScopeInfo, ScopeInfo);
return_ACPI_STATUS (AE_OK);
}
示例4: AcpiPsExecuteMethod
ACPI_STATUS
AcpiPsExecuteMethod (
ACPI_EVALUATE_INFO *Info)
{
ACPI_STATUS Status;
ACPI_PARSE_OBJECT *Op;
ACPI_WALK_STATE *WalkState;
ACPI_FUNCTION_TRACE (PsExecuteMethod);
/* Quick validation of DSDT header */
AcpiTbCheckDsdtHeader ();
/* Validate the Info and method Node */
if (!Info || !Info->ResolvedNode)
{
return_ACPI_STATUS (AE_NULL_ENTRY);
}
/* Init for new method, wait on concurrency semaphore */
Status = AcpiDsBeginMethodExecution (Info->ResolvedNode, Info->ObjDesc, NULL);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
/*
* The caller "owns" the parameters, so give each one an extra reference
*/
AcpiPsUpdateParameterList (Info, REF_INCREMENT);
/* Begin tracing if requested */
AcpiPsStartTrace (Info);
/*
* Execute the method. Performs parse simultaneously
*/
ACPI_DEBUG_PRINT ((ACPI_DB_PARSE,
"**** Begin Method Parse/Execute [%4.4s] **** Node=%p Obj=%p\n",
Info->ResolvedNode->Name.Ascii, Info->ResolvedNode, Info->ObjDesc));
/* Create and init a Root Node */
Op = AcpiPsCreateScopeOp ();
if (!Op)
{
Status = AE_NO_MEMORY;
goto Cleanup;
}
/* Create and initialize a new walk state */
Info->PassNumber = ACPI_IMODE_EXECUTE;
WalkState = AcpiDsCreateWalkState (
Info->ObjDesc->Method.OwnerId, NULL, NULL, NULL);
if (!WalkState)
{
Status = AE_NO_MEMORY;
goto Cleanup;
}
Status = AcpiDsInitAmlWalk (WalkState, Op, Info->ResolvedNode,
Info->ObjDesc->Method.AmlStart,
Info->ObjDesc->Method.AmlLength, Info, Info->PassNumber);
if (ACPI_FAILURE (Status))
{
AcpiDsDeleteWalkState (WalkState);
goto Cleanup;
}
if (Info->ObjDesc->Method.InfoFlags & ACPI_METHOD_MODULE_LEVEL)
{
WalkState->ParseFlags |= ACPI_PARSE_MODULE_LEVEL;
}
/* Invoke an internal method if necessary */
if (Info->ObjDesc->Method.InfoFlags & ACPI_METHOD_INTERNAL_ONLY)
{
Status = Info->ObjDesc->Method.Dispatch.Implementation (WalkState);
Info->ReturnObject = WalkState->ReturnDesc;
/* Cleanup states */
AcpiDsScopeStackClear (WalkState);
AcpiPsCleanupScope (&WalkState->ParserState);
AcpiDsTerminateControlMethod (WalkState->MethodDesc, WalkState);
AcpiDsDeleteWalkState (WalkState);
goto Cleanup;
}
/*
* Start method evaluation with an implicit return of zero. This is done
* for Windows compatibility.
//.........这里部分代码省略.........
示例5: acpi_ex_release_mutex
acpi_status
acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state)
{
acpi_status status = AE_OK;
u8 previous_sync_level;
struct acpi_thread_state *owner_thread;
ACPI_FUNCTION_TRACE(ex_release_mutex);
if (!obj_desc) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
owner_thread = obj_desc->mutex.owner_thread;
/* The mutex must have been previously acquired in order to release it */
if (!owner_thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
}
/* Must have a valid thread ID */
if (!walk_state->thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], null thread info",
acpi_ut_get_node_name(obj_desc->mutex.node)));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
/*
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
*/
if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
(obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
"Thread %u cannot release Mutex [%4.4s] acquired by thread %u",
(u32)walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
(u32)owner_thread->thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
/*
* The sync level of the mutex must be equal to the current sync level. In
* other words, the current level means that at least one mutex at that
* level is currently being held. Attempting to release a mutex of a
* different level can only mean that the mutex ordering rule is being
* violated. This behavior is clarified in ACPI 4.0 specification.
*/
if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
acpi_ut_get_node_name(obj_desc->mutex.node),
obj_desc->mutex.sync_level,
walk_state->thread->current_sync_level));
return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
}
/*
* Get the previous sync_level from the head of the acquired mutex list.
* This handles the case where several mutexes at the same level have been
* acquired, but are not released in reverse order.
*/
previous_sync_level =
owner_thread->acquired_mutex_list->mutex.original_sync_level;
status = acpi_ex_release_mutex_object(obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (obj_desc->mutex.acquisition_depth == 0) {
/* Restore the previous sync_level */
owner_thread->current_sync_level = previous_sync_level;
}
return_ACPI_STATUS(status);
}
示例6: AcpiNsLookup
ACPI_STATUS
AcpiNsLookup (
ACPI_GENERIC_STATE *ScopeInfo,
char *Pathname,
ACPI_OBJECT_TYPE Type,
ACPI_INTERPRETER_MODE InterpreterMode,
UINT32 Flags,
ACPI_WALK_STATE *WalkState,
ACPI_NAMESPACE_NODE **ReturnNode)
{
ACPI_STATUS Status;
char *Path = Pathname;
ACPI_NAMESPACE_NODE *PrefixNode;
ACPI_NAMESPACE_NODE *CurrentNode = NULL;
ACPI_NAMESPACE_NODE *ThisNode = NULL;
UINT32 NumSegments;
UINT32 NumCarats;
ACPI_NAME SimpleName;
ACPI_OBJECT_TYPE TypeToCheckFor;
ACPI_OBJECT_TYPE ThisSearchType;
UINT32 SearchParentFlag = ACPI_NS_SEARCH_PARENT;
UINT32 LocalFlags;
ACPI_FUNCTION_TRACE (NsLookup);
if (!ReturnNode)
{
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
LocalFlags = Flags &
~(ACPI_NS_ERROR_IF_FOUND | ACPI_NS_OVERRIDE_IF_FOUND |
ACPI_NS_SEARCH_PARENT);
*ReturnNode = ACPI_ENTRY_NOT_FOUND;
AcpiGbl_NsLookupCount++;
if (!AcpiGbl_RootNode)
{
return_ACPI_STATUS (AE_NO_NAMESPACE);
}
/* Get the prefix scope. A null scope means use the root scope */
if ((!ScopeInfo) ||
(!ScopeInfo->Scope.Node))
{
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES,
"Null scope prefix, using root node (%p)\n",
AcpiGbl_RootNode));
PrefixNode = AcpiGbl_RootNode;
}
else
{
PrefixNode = ScopeInfo->Scope.Node;
if (ACPI_GET_DESCRIPTOR_TYPE (PrefixNode) != ACPI_DESC_TYPE_NAMED)
{
ACPI_ERROR ((AE_INFO, "%p is not a namespace node [%s]",
PrefixNode, AcpiUtGetDescriptorName (PrefixNode)));
return_ACPI_STATUS (AE_AML_INTERNAL);
}
if (!(Flags & ACPI_NS_PREFIX_IS_SCOPE))
{
/*
* This node might not be a actual "scope" node (such as a
* Device/Method, etc.) It could be a Package or other object
* node. Backup up the tree to find the containing scope node.
*/
while (!AcpiNsOpensScope (PrefixNode->Type) &&
PrefixNode->Type != ACPI_TYPE_ANY)
{
PrefixNode = PrefixNode->Parent;
}
}
}
/* Save type. TBD: may be no longer necessary */
TypeToCheckFor = Type;
/*
* Begin examination of the actual pathname
*/
if (!Pathname)
{
/* A Null NamePath is allowed and refers to the root */
NumSegments = 0;
ThisNode = AcpiGbl_RootNode;
Path = "";
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES,
"Null Pathname (Zero segments), Flags=%X\n", Flags));
}
else
{
/*
//.........这里部分代码省略.........
示例7: AcpiDsStoreObjectToLocal
ACPI_STATUS
AcpiDsStoreObjectToLocal (
UINT8 Type,
UINT32 Index,
ACPI_OPERAND_OBJECT *ObjDesc,
ACPI_WALK_STATE *WalkState)
{
ACPI_STATUS Status;
ACPI_NAMESPACE_NODE *Node;
ACPI_OPERAND_OBJECT *CurrentObjDesc;
ACPI_OPERAND_OBJECT *NewObjDesc;
ACPI_FUNCTION_TRACE (DsStoreObjectToLocal);
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Type=%2.2X Index=%u Obj=%p\n",
Type, Index, ObjDesc));
/* Parameter validation */
if (!ObjDesc)
{
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
/* Get the namespace node for the arg/local */
Status = AcpiDsMethodDataGetNode (Type, Index, WalkState, &Node);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
CurrentObjDesc = AcpiNsGetAttachedObject (Node);
if (CurrentObjDesc == ObjDesc)
{
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "Obj=%p already installed!\n",
ObjDesc));
return_ACPI_STATUS (Status);
}
/*
* If the reference count on the object is more than one, we must
* take a copy of the object before we store. A reference count
* of exactly 1 means that the object was just created during the
* evaluation of an expression, and we can safely use it since it
* is not used anywhere else.
*/
NewObjDesc = ObjDesc;
if (ObjDesc->Common.ReferenceCount > 1)
{
Status = AcpiUtCopyIobjectToIobject (
ObjDesc, &NewObjDesc, WalkState);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
}
/*
* If there is an object already in this slot, we either
* have to delete it, or if this is an argument and there
* is an object reference stored there, we have to do
* an indirect store!
*/
if (CurrentObjDesc)
{
/*
* Check for an indirect store if an argument
* contains an object reference (stored as an Node).
* We don't allow this automatic dereferencing for
* locals, since a store to a local should overwrite
* anything there, including an object reference.
*
* If both Arg0 and Local0 contain RefOf (Local4):
*
* Store (1, Arg0) - Causes indirect store to local4
* Store (1, Local0) - Stores 1 in local0, overwriting
* the reference to local4
* Store (1, DeRefof (Local0)) - Causes indirect store to local4
*
* Weird, but true.
*/
if (Type == ACPI_REFCLASS_ARG)
{
/*
* If we have a valid reference object that came from RefOf(),
* do the indirect store
*/
if ((ACPI_GET_DESCRIPTOR_TYPE (CurrentObjDesc) ==
ACPI_DESC_TYPE_OPERAND) &&
(CurrentObjDesc->Common.Type ==
ACPI_TYPE_LOCAL_REFERENCE) &&
(CurrentObjDesc->Reference.Class ==
ACPI_REFCLASS_REFOF))
{
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"Arg (%p) is an ObjRef(Node), storing in node %p\n",
NewObjDesc, CurrentObjDesc));
/*
//.........这里部分代码省略.........
示例8: AcpiEvPciConfigRegionSetup
ACPI_STATUS
AcpiEvPciConfigRegionSetup (
ACPI_HANDLE Handle,
UINT32 Function,
void *HandlerContext,
void **RegionContext)
{
ACPI_STATUS Status = AE_OK;
ACPI_INTEGER PciValue;
ACPI_PCI_ID *PciId = *RegionContext;
ACPI_OPERAND_OBJECT *HandlerObj;
ACPI_NAMESPACE_NODE *ParentNode;
ACPI_NAMESPACE_NODE *PciRootNode;
ACPI_NAMESPACE_NODE *PciDeviceNode;
ACPI_OPERAND_OBJECT *RegionObj = (ACPI_OPERAND_OBJECT *) Handle;
ACPI_FUNCTION_TRACE (EvPciConfigRegionSetup);
HandlerObj = RegionObj->Region.Handler;
if (!HandlerObj)
{
/*
* No installed handler. This shouldn't happen because the dispatch
* routine checks before we get here, but we check again just in case.
*/
ACPI_DEBUG_PRINT ((ACPI_DB_OPREGION,
"Attempting to init a region %p, with no handler\n", RegionObj));
return_ACPI_STATUS (AE_NOT_EXIST);
}
*RegionContext = NULL;
if (Function == ACPI_REGION_DEACTIVATE)
{
if (PciId)
{
ACPI_FREE (PciId);
}
return_ACPI_STATUS (Status);
}
ParentNode = AcpiNsGetParentNode (RegionObj->Region.Node);
/*
* Get the _SEG and _BBN values from the device upon which the handler
* is installed.
*
* We need to get the _SEG and _BBN objects relative to the PCI BUS device.
* This is the device the handler has been registered to handle.
*/
/*
* If the AddressSpace.Node is still pointing to the root, we need
* to scan upward for a PCI Root bridge and re-associate the OpRegion
* handlers with that device.
*/
if (HandlerObj->AddressSpace.Node == AcpiGbl_RootNode)
{
/* Start search from the parent object */
PciRootNode = ParentNode;
while (PciRootNode != AcpiGbl_RootNode)
{
/* Get the _HID/_CID in order to detect a RootBridge */
if (AcpiEvIsPciRootBridge (PciRootNode))
{
/* Install a handler for this PCI root bridge */
Status = AcpiInstallAddressSpaceHandler (
(ACPI_HANDLE) PciRootNode,
ACPI_ADR_SPACE_PCI_CONFIG,
ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE (Status))
{
if (Status == AE_SAME_HANDLER)
{
/*
* It is OK if the handler is already installed on the root
* bridge. Still need to return a context object for the
* new PCI_Config operation region, however.
*/
Status = AE_OK;
}
else
{
ACPI_EXCEPTION ((AE_INFO, Status,
"Could not install PciConfig handler for Root Bridge %4.4s",
AcpiUtGetNodeName (PciRootNode)));
}
}
break;
}
PciRootNode = AcpiNsGetParentNode (PciRootNode);
}
/* PCI root bridge not found, use namespace root node */
}
//.........这里部分代码省略.........
示例9: AcpiDsMethodDataGetValue
ACPI_STATUS
AcpiDsMethodDataGetValue (
UINT8 Type,
UINT32 Index,
ACPI_WALK_STATE *WalkState,
ACPI_OPERAND_OBJECT **DestDesc)
{
ACPI_STATUS Status;
ACPI_NAMESPACE_NODE *Node;
ACPI_OPERAND_OBJECT *Object;
ACPI_FUNCTION_TRACE (DsMethodDataGetValue);
/* Validate the object descriptor */
if (!DestDesc)
{
ACPI_ERROR ((AE_INFO, "Null object descriptor pointer"));
return_ACPI_STATUS (AE_BAD_PARAMETER);
}
/* Get the namespace node for the arg/local */
Status = AcpiDsMethodDataGetNode (Type, Index, WalkState, &Node);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
/* Get the object from the node */
Object = Node->Object;
/* Examine the returned object, it must be valid. */
if (!Object)
{
/*
* Index points to uninitialized object.
* This means that either 1) The expected argument was
* not passed to the method, or 2) A local variable
* was referenced by the method (via the ASL)
* before it was initialized. Either case is an error.
*/
/* If slack enabled, init the LocalX/ArgX to an Integer of value zero */
if (AcpiGbl_EnableInterpreterSlack)
{
Object = AcpiUtCreateIntegerObject ((UINT64) 0);
if (!Object)
{
return_ACPI_STATUS (AE_NO_MEMORY);
}
Node->Object = Object;
}
/* Otherwise, return the error */
else switch (Type)
{
case ACPI_REFCLASS_ARG:
ACPI_ERROR ((AE_INFO,
"Uninitialized Arg[%u] at node %p",
Index, Node));
return_ACPI_STATUS (AE_AML_UNINITIALIZED_ARG);
case ACPI_REFCLASS_LOCAL:
/*
* No error message for this case, will be trapped again later to
* detect and ignore cases of Store(LocalX,LocalX)
*/
return_ACPI_STATUS (AE_AML_UNINITIALIZED_LOCAL);
default:
ACPI_ERROR ((AE_INFO, "Not a Arg/Local opcode: 0x%X", Type));
return_ACPI_STATUS (AE_AML_INTERNAL);
}
}
/*
* The Index points to an initialized and valid object.
* Return an additional reference to the object
*/
*DestDesc = Object;
AcpiUtAddReference (Object);
return_ACPI_STATUS (AE_OK);
}
示例10: acpi_ev_gpe_dispatch
u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
acpi_os_gpe_count(gpe_number);
/*
* If edge-triggered, clear the GPE status bit now. Note that
* level-triggered events are cleared after the GPE is serviced.
*/
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_EDGE_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
/*
* Dispatch the GPE to either an installed handler, or the control method
* associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
* it and do not attempt to run the method. If there is neither a handler
* nor a method, we disable this GPE to prevent further such pointless
* events from firing.
*/
switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
case ACPI_GPE_DISPATCH_HANDLER:
/*
* Invoke the installed handler (at interrupt level)
* Ignore return status for now.
* TBD: leave GPE disabled on error?
*/
(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
dispatch.
handler->
context);
/* It is now safe to clear level-triggered events. */
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_LEVEL_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
break;
case ACPI_GPE_DISPATCH_METHOD:
/*
* Disable the GPE, so it doesn't keep firing before the method has a
* chance to run (it runs asynchronously with interrupts enabled).
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
* Execute the method associated with the GPE
* NOTE: Level-triggered GPEs are cleared after the method completes.
*/
status = acpi_os_execute(OSL_GPE_HANDLER,
acpi_ev_asynch_execute_gpe_method,
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE[%2X] - event disabled",
gpe_number));
}
break;
default:
/* No handler or method to run! */
ACPI_ERROR((AE_INFO,
"No handler or method for GPE[%2X], disabling event",
gpe_number));
/*
* Disable the GPE. The GPE will remain disabled until the ACPICA
* Core Subsystem is restarted, or a handler is installed.
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
//.........这里部分代码省略.........
示例11: acpi_ev_asynch_execute_gpe_method
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = (void *)context;
acpi_status status;
struct acpi_gpe_event_info local_gpe_event_info;
struct acpi_evaluate_info *info;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/* Must revalidate the gpe_number/gpe_block */
if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_VOID;
}
/* Set the GPE flags for return to enabled state */
(void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
/*
* Take a snapshot of the GPE info for this level - we copy the info to
* prevent a race condition with remove_handler/remove_block.
*/
ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
sizeof(struct acpi_gpe_event_info));
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/*
* Must check for control method type dispatch one more time to avoid a
* race with ev_gpe_install_handler
*/
if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD) {
/* Allocate the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
status = AE_NO_MEMORY;
} else {
/*
* Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
* control method that corresponds to this GPE
*/
info->prefix_node =
local_gpe_event_info.dispatch.method_node;
info->flags = ACPI_IGNORE_RETURN_VALUE;
status = acpi_ns_evaluate(info);
ACPI_FREE(info);
}
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"while evaluating GPE method [%4.4s]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
method_node)));
}
}
/* Defer enabling of GPE until all notify handlers are done */
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
gpe_event_info);
return_VOID;
}
示例12: acpi_ns_search_node
acpi_status
acpi_ns_search_node (
u32 target_name,
struct acpi_namespace_node *node,
acpi_object_type type,
struct acpi_namespace_node **return_node)
{
struct acpi_namespace_node *next_node;
ACPI_FUNCTION_TRACE ("ns_search_node");
#ifdef ACPI_DEBUG_OUTPUT
if (ACPI_LV_NAMES & acpi_dbg_level) {
char *scope_name;
scope_name = acpi_ns_get_external_pathname (node);
if (scope_name) {
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES,
"Searching %s (%p) For [%4.4s] (%s)\n",
scope_name, node, (char *) &target_name,
acpi_ut_get_type_name (type)));
ACPI_MEM_FREE (scope_name);
}
}
#endif
/*
* Search for name at this namespace level, which is to say that we
* must search for the name among the children of this object
*/
next_node = node->child;
while (next_node) {
/* Check for match against the name */
if (next_node->name.integer == target_name) {
/* Resolve a control method alias if any */
if (acpi_ns_get_type (next_node) == ACPI_TYPE_LOCAL_METHOD_ALIAS) {
next_node = ACPI_CAST_PTR (struct acpi_namespace_node, next_node->object);
}
/*
* Found matching entry.
*/
ACPI_DEBUG_PRINT ((ACPI_DB_NAMES,
"Name [%4.4s] (%s) %p found in scope [%4.4s] %p\n",
(char *) &target_name, acpi_ut_get_type_name (next_node->type),
next_node, acpi_ut_get_node_name (node), node));
*return_node = next_node;
return_ACPI_STATUS (AE_OK);
}
/*
* The last entry in the list points back to the parent,
* so a flag is used to indicate the end-of-list
*/
if (next_node->flags & ANOBJ_END_OF_PEER_LIST) {
/* Searched entire list, we are done */
break;
}
/* Didn't match name, move on to the next peer object */
next_node = next_node->peer;
}
示例13: AcpiEvMatchGpeMethod
ACPI_STATUS
AcpiEvMatchGpeMethod (
ACPI_HANDLE ObjHandle,
UINT32 Level,
void *Context,
void **ReturnValue)
{
ACPI_NAMESPACE_NODE *MethodNode = ACPI_CAST_PTR (ACPI_NAMESPACE_NODE, ObjHandle);
ACPI_GPE_WALK_INFO *WalkInfo = ACPI_CAST_PTR (ACPI_GPE_WALK_INFO, Context);
ACPI_GPE_EVENT_INFO *GpeEventInfo;
UINT32 GpeNumber;
char Name[ACPI_NAME_SIZE + 1];
UINT8 Type;
ACPI_FUNCTION_TRACE (EvMatchGpeMethod);
/* Check if requested OwnerId matches this OwnerId */
if ((WalkInfo->ExecuteByOwnerId) &&
(MethodNode->OwnerId != WalkInfo->OwnerId))
{
return_ACPI_STATUS (AE_OK);
}
/*
* Match and decode the _Lxx and _Exx GPE method names
*
* 1) Extract the method name and null terminate it
*/
ACPI_MOVE_32_TO_32 (Name, &MethodNode->Name.Integer);
Name[ACPI_NAME_SIZE] = 0;
/* 2) Name must begin with an underscore */
if (Name[0] != '_')
{
return_ACPI_STATUS (AE_OK); /* Ignore this method */
}
/*
* 3) Edge/Level determination is based on the 2nd character
* of the method name
*/
switch (Name[1])
{
case 'L':
Type = ACPI_GPE_LEVEL_TRIGGERED;
break;
case 'E':
Type = ACPI_GPE_EDGE_TRIGGERED;
break;
default:
/* Unknown method type, just ignore it */
ACPI_DEBUG_PRINT ((ACPI_DB_LOAD,
"Ignoring unknown GPE method type: %s "
"(name not of form _Lxx or _Exx)", Name));
return_ACPI_STATUS (AE_OK);
}
/* 4) The last two characters of the name are the hex GPE Number */
GpeNumber = strtoul (&Name[2], NULL, 16);
if (GpeNumber == ACPI_UINT32_MAX)
{
/* Conversion failed; invalid method, just ignore it */
ACPI_DEBUG_PRINT ((ACPI_DB_LOAD,
"Could not extract GPE number from name: %s "
"(name is not of form _Lxx or _Exx)", Name));
return_ACPI_STATUS (AE_OK);
}
/* Ensure that we have a valid GPE number for this GPE block */
GpeEventInfo = AcpiEvLowGetGpeInfo (GpeNumber, WalkInfo->GpeBlock);
if (!GpeEventInfo)
{
/*
* This GpeNumber is not valid for this GPE block, just ignore it.
* However, it may be valid for a different GPE block, since GPE0
* and GPE1 methods both appear under \_GPE.
*/
return_ACPI_STATUS (AE_OK);
}
if ((ACPI_GPE_DISPATCH_TYPE (GpeEventInfo->Flags) ==
ACPI_GPE_DISPATCH_HANDLER) ||
(ACPI_GPE_DISPATCH_TYPE (GpeEventInfo->Flags) ==
ACPI_GPE_DISPATCH_RAW_HANDLER))
{
/* If there is already a handler, ignore this GPE method */
//.........这里部分代码省略.........
示例14: AcpiEvGpeInitialize
ACPI_STATUS
AcpiEvGpeInitialize (
void)
{
UINT32 RegisterCount0 = 0;
UINT32 RegisterCount1 = 0;
UINT32 GpeNumberMax = 0;
ACPI_STATUS Status;
ACPI_FUNCTION_TRACE (EvGpeInitialize);
ACPI_DEBUG_PRINT_RAW ((ACPI_DB_INIT,
"Initializing General Purpose Events (GPEs):\n"));
Status = AcpiUtAcquireMutex (ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE (Status))
{
return_ACPI_STATUS (Status);
}
/*
* Initialize the GPE Block(s) defined in the FADT
*
* Why the GPE register block lengths are divided by 2: From the ACPI
* Spec, section "General-Purpose Event Registers", we have:
*
* "Each register block contains two registers of equal length
* GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
* GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
* The length of the GPE1_STS and GPE1_EN registers is equal to
* half the GPE1_LEN. If a generic register block is not supported
* then its respective block pointer and block length values in the
* FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
* to be the same size."
*/
/*
* Determine the maximum GPE number for this machine.
*
* Note: both GPE0 and GPE1 are optional, and either can exist without
* the other.
*
* If EITHER the register length OR the block address are zero, then that
* particular block is not supported.
*/
if (AcpiGbl_FADT.Gpe0BlockLength &&
AcpiGbl_FADT.XGpe0Block.Address)
{
/* GPE block 0 exists (has both length and address > 0) */
RegisterCount0 = (UINT16) (AcpiGbl_FADT.Gpe0BlockLength / 2);
GpeNumberMax = (RegisterCount0 * ACPI_GPE_REGISTER_WIDTH) - 1;
/* Install GPE Block 0 */
Status = AcpiEvCreateGpeBlock (AcpiGbl_FadtGpeDevice,
AcpiGbl_FADT.XGpe0Block.Address,
AcpiGbl_FADT.XGpe0Block.SpaceId,
RegisterCount0, 0,
AcpiGbl_FADT.SciInterrupt, &AcpiGbl_GpeFadtBlocks[0]);
if (ACPI_FAILURE (Status))
{
ACPI_EXCEPTION ((AE_INFO, Status,
"Could not create GPE Block 0"));
}
}
if (AcpiGbl_FADT.Gpe1BlockLength &&
AcpiGbl_FADT.XGpe1Block.Address)
{
/* GPE block 1 exists (has both length and address > 0) */
RegisterCount1 = (UINT16) (AcpiGbl_FADT.Gpe1BlockLength / 2);
/* Check for GPE0/GPE1 overlap (if both banks exist) */
if ((RegisterCount0) &&
(GpeNumberMax >= AcpiGbl_FADT.Gpe1Base))
{
ACPI_ERROR ((AE_INFO,
"GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
"(GPE %u to %u) - Ignoring GPE1",
GpeNumberMax, AcpiGbl_FADT.Gpe1Base,
AcpiGbl_FADT.Gpe1Base +
((RegisterCount1 * ACPI_GPE_REGISTER_WIDTH) - 1)));
/* Ignore GPE1 block by setting the register count to zero */
RegisterCount1 = 0;
}
else
{
/* Install GPE Block 1 */
Status = AcpiEvCreateGpeBlock (AcpiGbl_FadtGpeDevice,
AcpiGbl_FADT.XGpe1Block.Address,
AcpiGbl_FADT.XGpe1Block.SpaceId,
//.........这里部分代码省略.........
示例15: AcpiTbInstallAndLoadTable
ACPI_STATUS
AcpiTbInstallAndLoadTable (
ACPI_TABLE_HEADER *Table,
ACPI_PHYSICAL_ADDRESS Address,
UINT8 Flags,
BOOLEAN Override,
UINT32 *TableIndex)
{
ACPI_STATUS Status;
UINT32 i;
ACPI_OWNER_ID OwnerId;
ACPI_FUNCTION_TRACE (AcpiLoadTable);
(void) AcpiUtAcquireMutex (ACPI_MTX_TABLES);
/* Install the table and load it into the namespace */
Status = AcpiTbInstallStandardTable (Address, Flags, TRUE,
Override, &i);
if (ACPI_FAILURE (Status))
{
goto UnlockAndExit;
}
/*
* Note: Now table is "INSTALLED", it must be validated before
* using.
*/
Status = AcpiTbValidateTable (&AcpiGbl_RootTableList.Tables[i]);
if (ACPI_FAILURE (Status))
{
goto UnlockAndExit;
}
(void) AcpiUtReleaseMutex (ACPI_MTX_TABLES);
Status = AcpiNsLoadTable (i, AcpiGbl_RootNode);
/* Execute any module-level code that was found in the table */
if (!AcpiGbl_ParseTableAsTermList && AcpiGbl_GroupModuleLevelCode)
{
AcpiNsExecModuleCodeList ();
}
/*
* Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
* responsible for discovering any new wake GPEs by running _PRW methods
* that may have been loaded by this table.
*/
Status = AcpiTbGetOwnerId (i, &OwnerId);
if (ACPI_SUCCESS (Status))
{
AcpiEvUpdateGpes (OwnerId);
}
/* Invoke table handler if present */
if (AcpiGbl_TableHandler)
{
(void) AcpiGbl_TableHandler (ACPI_TABLE_EVENT_LOAD, Table,
AcpiGbl_TableHandlerContext);
}
(void) AcpiUtAcquireMutex (ACPI_MTX_TABLES);
UnlockAndExit:
*TableIndex = i;
(void) AcpiUtReleaseMutex (ACPI_MTX_TABLES);
return_ACPI_STATUS (Status);
}