本文整理汇总了C++中tr::SymbolReference类的典型用法代码示例。如果您正苦于以下问题:C++ SymbolReference类的具体用法?C++ SymbolReference怎么用?C++ SymbolReference使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SymbolReference类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sizeof
void
OMR::IlValue::storeToAuto()
{
if (_symRefThatCanBeUsedInOtherBlocks == NULL)
{
TR::Compilation *comp = TR::comp();
// first use from another block, need to create symref and insert store tree where node was computed
TR::SymbolReference *symRef = comp->getSymRefTab()->createTemporary(_methodBuilder->methodSymbol(), _nodeThatComputesValue->getDataType());
symRef->getSymbol()->setNotCollected();
char *name = (char *) comp->trMemory()->allocateHeapMemory((2+10+1) * sizeof(char)); // 2 ("_T") + max 10 digits + trailing zero
sprintf(name, "_T%u", symRef->getCPIndex());
symRef->getSymbol()->getAutoSymbol()->setName(name);
_methodBuilder->defineSymbol(name, symRef);
// create store and its treetop
TR::Node *storeNode = TR::Node::createStore(symRef, _nodeThatComputesValue);
TR::TreeTop *prevTreeTop = _treeTopThatAnchorsValue->getPrevTreeTop();
TR::TreeTop *newTree = TR::TreeTop::create(comp, storeNode);
newTree->insertNewTreeTop(prevTreeTop, _treeTopThatAnchorsValue);
_treeTopThatAnchorsValue->unlink(true);
_treeTopThatAnchorsValue = newTree;
_symRefThatCanBeUsedInOtherBlocks = symRef;
}
}
示例2: cg
TR::Register *
TR::AMD64SystemLinkage::buildIndirectDispatch(TR::Node *callNode)
{
TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
TR_ASSERT(methodSymRef->getSymbol()->castToMethodSymbol()->isComputed(), "system linkage only supports computed indirect call for now %p\n", callNode);
// Evaluate VFT
//
TR::Register *vftRegister;
TR::Node *vftNode = callNode->getFirstChild();
if (vftNode->getRegister())
{
vftRegister = vftNode->getRegister();
}
else
{
vftRegister = cg()->evaluate(vftNode);
}
// Allocate adequate register dependencies.
//
// pre = number of argument registers + 1 for VFT register
// post = number of volatile + VMThread + return register
//
uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters() + 1;
uint32_t post = getProperties().getNumVolatileRegisters() + 1 + (callNode->getDataType() == TR::NoType ? 0 : 1);
#if defined (PYTHON) && 0
// Treat all preserved GP regs as volatile until register map support available.
//
post += getProperties().getNumberOfPreservedGPRegisters();
#endif
TR::RegisterDependencyConditions *callDeps = generateRegisterDependencyConditions(pre, 1, cg());
TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
callDeps->addPostCondition(vftRegister, scratchRegIndex, cg());
callDeps->stopAddingPostConditions();
// Evaluate outgoing arguments on the system stack and build pre-conditions.
//
int32_t memoryArgSize = buildArgs(callNode, callDeps);
// Dispatch
//
generateRegInstruction(CALLReg, callNode, vftRegister, callDeps, cg());
cg()->resetIsLeafMethod();
// Build label post-conditions
//
TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());
TR::Register *returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
postDeps->stopAddingPostConditions();
TR::LabelSymbol *postDepLabel = generateLabelSymbol(cg());
generateLabelInstruction(LABEL, callNode, postDepLabel, postDeps, cg());
return returnReg;
}
示例3: i
void
OMR::SymbolReference::setLiteralPoolAliases(TR_BitVector * aliases, TR::SymbolReferenceTable * symRefTab)
{
if (!symRefTab->findGenericIntShadowSymbol())
return;
TR_SymRefIterator i(symRefTab->aliasBuilder.genericIntShadowSymRefs(), symRefTab);
TR::SymbolReference * symRef;
while ((symRef = i.getNext()))
if (symRef->isLiteralPoolAddress() || symRef->isFromLiteralPool())
aliases->set(symRef->getReferenceNumber());
aliases->set(self()->getReferenceNumber());
*aliases |= symRefTab->aliasBuilder.unsafeSymRefNumbers();
}
示例4:
// resolved casts that are not to abstract, interface, or array need a super test
bool OMR::TreeEvaluator::instanceOfOrCheckCastNeedSuperTest(TR::Node * node, TR::CodeGenerator *cg)
{
TR::Node *castClassNode = node->getSecondChild();
TR::MethodSymbol *helperSym = node->getSymbol()->castToMethodSymbol();
TR::SymbolReference *castClassSymRef = castClassNode->getSymbolReference();
if (!TR::TreeEvaluator::isStaticClassSymRef(castClassSymRef))
{
// We could theoretically do a super test on something with no sym, but it would require significant
// changes to platform code. The benefit is little at this point (shows up from reference arraycopy reductions)
if (cg->supportsInliningOfIsInstance() &&
node->getOpCodeValue() == TR::instanceof &&
node->getSecondChild()->getOpCodeValue() != TR::loadaddr)
return true;
else
return false;
}
TR::StaticSymbol *castClassSym = castClassSymRef->getSymbol()->getStaticSymbol();
if (castClassSymRef->isUnresolved())
{
return false;
}
else
{
TR_OpaqueClassBlock * clazz;
// If the class is a regular class (i.e., not an interface nor an array) and
// not known to be a final class, an inline superclass test can be generated.
// If the helper does not preserve all the registers there will not be
// enough registers to do the superclass test inline.
// Also, don't generate the superclass test if optimizing for space.
//
if (castClassSym &&
(clazz = (TR_OpaqueClassBlock *) castClassSym->getStaticAddress()) &&
!TR::Compiler->cls.isClassArray(cg->comp(), clazz) &&
!TR::Compiler->cls.isInterfaceClass(cg->comp(), clazz) &&
!TR::Compiler->cls.isClassFinal(cg->comp(), clazz) &&
helperSym->preservesAllRegisters() &&
!cg->comp()->getOption(TR_OptimizeForSpace))
return true;
}
return false;
}
示例5: isSupportedNodeForPREPerformance
bool TR_LocalAnalysis::isSupportedNodeForPREPerformance(TR::Node *node, TR::Compilation *comp, TR::Node *parent)
{
TR::SymbolReference *symRef = node->getOpCode().hasSymbolReference()?node->getSymbolReference():NULL;
if (node->getOpCode().isStore() && symRef &&
symRef->getSymbol()->isAutoOrParm())
{
//dumpOptDetails("Returning false for store %p\n", node);
return false;
}
if (node->getOpCode().isLoadConst() && !comp->cg()->isMaterialized(node))
{
return false;
}
if (node->getOpCode().hasSymbolReference() &&
(node->getSymbolReference() == comp->getSymRefTab()->findJavaLangClassFromClassSymbolRef()))
{
return false;
}
return true;
}
示例6: stackRegion
int32_t TR::DeadTreesElimination::process(TR::TreeTop *startTree, TR::TreeTop *endTree)
{
TR::StackMemoryRegion stackRegion(*comp()->trMemory());
LongestPathMap longestPaths(std::less<TR::Node*>(), stackRegion);
typedef TR::typed_allocator<CRAnchor, TR::Region&> CRAnchorAlloc;
typedef TR::forward_list<CRAnchor, CRAnchorAlloc> CRAnchorList;
CRAnchorList anchors(stackRegion);
vcount_t visitCount = comp()->incOrResetVisitCount();
TR::TreeTop *treeTop;
for (treeTop = startTree; (treeTop != endTree); treeTop = treeTop->getNextTreeTop())
treeTop->getNode()->initializeFutureUseCounts(visitCount);
TR::Block *block = NULL;
bool delayedRegStoresBeforeThisPass = _delayedRegStores;
// Update visitCount as they are used in this optimization and need to be
visitCount = comp()->incOrResetVisitCount();
for (TR::TreeTopIterator iter(startTree, comp()); iter != endTree; ++iter)
{
TR::Node *node = iter.currentTree()->getNode();
if (node->getOpCodeValue() == TR::BBStart)
{
block = node->getBlock();
if (!block->isExtensionOfPreviousBlock())
longestPaths.clear();
}
int vcountLimit = MAX_VCOUNT - 3;
if (comp()->getVisitCount() > vcountLimit)
{
dumpOptDetails(comp(),
"%sVisit count %d exceeds limit %d; stopping\n",
optDetailString(), comp()->getVisitCount(), vcountLimit);
return 0;
}
// correct at all intermediate stages
//
if ((node->getOpCodeValue() != TR::treetop) &&
(!node->getOpCode().isAnchor() || (node->getFirstChild()->getReferenceCount() != 1)) &&
(!node->getOpCode().isStoreReg() || (node->getFirstChild()->getReferenceCount() != 1)) &&
(delayedRegStoresBeforeThisPass ||
(iter.currentTree() == block->getLastRealTreeTop()) ||
!node->getOpCode().isStoreReg() ||
(node->getVisitCount() == visitCount)))
{
if (node->getOpCode().isAnchor() && node->getFirstChild()->getOpCode().isLoadIndirect())
anchors.push_front(CRAnchor(iter.currentTree(), block));
TR::TransformUtil::recursivelySetNodeVisitCount(node, visitCount);
continue;
}
if (node->getOpCode().isStoreReg())
_delayedRegStores = true;
TR::Node *child = node->getFirstChild();
if (child->getOpCodeValue() == TR::PassThrough)
{
TR::Node *newChild = child->getFirstChild();
node->setAndIncChild(0, newChild);
newChild->incFutureUseCount();
if (child->getReferenceCount() <= 1)
optimizer()->prepareForNodeRemoval(child);
child->recursivelyDecReferenceCount();
recursivelyDecFutureUseCount(child);
child = newChild;
}
bool treeTopCanBeEliminated = false;
// If the treetop child has been seen before then it must be anchored
// somewhere above already; so we don't need the treetop to be anchoring
// this node (as the computation is already done at the first reference to
// the node).
//
if (visitCount == child->getVisitCount())
{
treeTopCanBeEliminated = true;
}
else
{
TR::ILOpCode &childOpCode = child->getOpCode();
TR::ILOpCodes opCodeValue = childOpCode.getOpCodeValue();
bool seenConditionalBranch = false;
bool callWithNoSideEffects = child->getOpCode().isCall() &&
child->getSymbolReference()->getSymbol()->isResolvedMethod() &&
child->getSymbolReference()->getSymbol()->castToResolvedMethodSymbol()->isSideEffectFree();
if (callWithNoSideEffects)
{
treeTopCanBeEliminated = true;
}
else if (!((childOpCode.isCall() && !callWithNoSideEffects) ||
childOpCode.isStore() ||
//.........这里部分代码省略.........
示例7: isAnySymInDefinedOrUsedBy
// Returns true if there is any constraint to the move
bool TR_LocalLiveRangeReduction::isAnySymInDefinedOrUsedBy(TR_TreeRefInfo *currentTreeRefInfo, TR::Node *currentNode, TR_TreeRefInfo *movingTreeRefInfo )
{
TR::Node *movingNode = movingTreeRefInfo->getTreeTop()->getNode();
// ignore anchors
//
if (movingNode->getOpCode().isAnchor())
movingNode = movingNode->getFirstChild();
TR::ILOpCode &opCode = currentNode->getOpCode();
////if ((opCode.getOpCodeValue() == TR::monent) || (opCode.getOpCodeValue() == TR::monexit))
if (nodeMaybeMonitor(currentNode))
{
if (trace())
traceMsg(comp(),"cannot move %p beyond monitor %p\n",movingNode,currentNode);
return true;
}
// Don't move gc points or things across gc points
//
if (movingNode->canGCandReturn() ||
currentNode->canGCandReturn())
{
if (trace())
traceMsg(comp(), "cannot move gc points %p past %p\n", movingNode, currentNode);
return true;
}
// Don't move checks or calls at all
//
if (containsCallOrCheck(movingTreeRefInfo,movingNode))
{
if (trace())
traceMsg(comp(),"cannot move check or call %s\n", getDebug()->getName(movingNode));
return true;
}
// Don't move object header store past a GC point
//
if ((currentNode->getOpCode().isWrtBar() || currentNode->canCauseGC()) && mayBeObjectHeaderStore(movingNode, fe()))
{
if (trace())
traceMsg(comp(),"cannot move possible object header store %s past GC point %s\n", getDebug()->getName(movingNode), getDebug()->getName(currentNode));
return true;
}
if (TR::Compiler->target.cpu.isPower() && opCode.getOpCodeValue() == TR::allocationFence)
{
// Can't move allocations past flushes
if (movingNode->getOpCodeValue() == TR::treetop &&
movingNode->getFirstChild()->getOpCode().isNew() &&
(currentNode->getAllocation() == NULL ||
currentNode->getAllocation() == movingNode->getFirstChild()))
{
if (trace())
{
traceMsg(comp(),"cannot move %p beyond flush %p - ", movingNode, currentNode);
if (currentNode->getAllocation() == NULL)
traceMsg(comp(),"(flush with null allocation)\n");
else
traceMsg(comp(),"(flush for allocation %p)\n", currentNode->getAllocation());
}
return true;
}
// Can't move certain stores past flushes
// Exclude all indirect stores, they may be for stack allocs, in which case the flush is needed at least as a scheduling barrier
// Direct stores to autos and parms are the only safe candidates
if (movingNode->getOpCode().isStoreIndirect() ||
(movingNode->getOpCode().isStoreDirect() && !movingNode->getSymbol()->isParm() && !movingNode->getSymbol()->isAuto()))
{
if (trace())
traceMsg(comp(),"cannot move %p beyond flush %p - (flush for possible stack alloc)", movingNode, currentNode);
return true;
}
}
for (int32_t i = 0; i < currentNode->getNumChildren(); i++)
{
TR::Node *child = currentNode->getChild(i);
//Any node that has side effects (like call and newarrya) cannot be evaluated in the middle of the tree.
if (movingTreeRefInfo->getFirstRefNodesList()->find(child))
{
//for calls and unresolve symbol that are not under check
if (child->exceptionsRaised() ||
(child->getOpCode().hasSymbolReference() && child->getSymbolReference()->isUnresolved()))
{
if (trace())
traceMsg(comp(),"cannot move %p beyond %p - cannot change evaluation point of %p\n ",movingNode,currentTreeRefInfo->getTreeTop()->getNode(),child);
return true;
}
else if(movingNode->getOpCode().isStore())
{
TR::SymbolReference *stSymRef = movingNode->getSymbolReference();
int32_t stSymRefNum = stSymRef->getReferenceNumber();
//TR::SymbolReference *stSymRef = movingNode->getSymbolReference();
//.........这里部分代码省略.........
示例8: populatePotentialDeps
void TR_LocalLiveRangeReduction::populatePotentialDeps(TR_TreeRefInfo *treeRefInfo,TR::Node *node)
{
TR::ILOpCode &opCode = node->getOpCode();
if (node->getOpCode().hasSymbolReference())
{
TR::SymbolReference *symRef = node->getSymbolReference();
int32_t symRefNum = symRef->getReferenceNumber();
//set defSym - all symbols that might be written
if (opCode.isCall() || opCode.isResolveCheck()|| opCode.isStore() || node->mightHaveVolatileSymbolReference())
{
bool isCallDirect = false;
if (node->getOpCode().isCallDirect())
isCallDirect = true;
if (!symRef->getUseDefAliases(isCallDirect).isZero(comp()))
{
TR::SparseBitVector useDefAliases(comp()->allocator());
symRef->getUseDefAliases(isCallDirect).getAliases(useDefAliases);
TR::SparseBitVector::Cursor aliasCursor(useDefAliases);
for (aliasCursor.SetToFirstOne(); aliasCursor.Valid(); aliasCursor.SetToNextOne())
{
int32_t nextAlias = aliasCursor;
treeRefInfo->getDefSym()->set(nextAlias);
}
}
if (opCode.isStore())
treeRefInfo->getDefSym()->set(symRefNum);
}
//set useSym - all symbols that are used
if (opCode.canRaiseException())
{
TR::SparseBitVector useAliases(comp()->allocator());
symRef->getUseonlyAliases().getAliases(useAliases);
{
TR::SparseBitVector::Cursor aliasesCursor(useAliases);
for (aliasesCursor.SetToFirstOne(); aliasesCursor.Valid(); aliasesCursor.SetToNextOne())
{
int32_t nextAlias = aliasesCursor;
treeRefInfo->getUseSym()->set(nextAlias);
}
}
}
if (opCode.isLoadVar() || (opCode.getOpCodeValue() == TR::loadaddr))
{
treeRefInfo->getUseSym()->set(symRefNum);
}
}
for (int32_t i = 0; i < node->getNumChildren(); i++)
{
TR::Node *child = node->getChild(i);
//don't recurse over references (nodes which are not the first reference)
//
if (child->getReferenceCount()==1 || treeRefInfo->getFirstRefNodesList()->find(child))
populatePotentialDeps(treeRefInfo,child );
}
return;
}
示例9: initializeGenAndKillSetInfoForNode
void TR_ReachingDefinitions::initializeGenAndKillSetInfoForNode(TR::Node *node, TR_UseDefInfo::BitVector &defsKilled, bool seenException, int32_t blockNum, TR::Node *parent)
{
// Update gen and kill info for nodes in this subtree
//
int32_t i;
if (node->getVisitCount() == comp()->getVisitCount())
return;
node->setVisitCount(comp()->getVisitCount());
// Process the children first
//
for (i = node->getNumChildren()-1; i >= 0; --i)
{
initializeGenAndKillSetInfoForNode(node->getChild(i), defsKilled, seenException, blockNum, node);
}
bool irrelevantStore = false;
scount_t nodeIndex = node->getLocalIndex();
if (nodeIndex <= 0)
{
if (node->getOpCode().isStore() &&
node->getSymbol()->isAutoOrParm() &&
node->storedValueIsIrrelevant())
{
irrelevantStore = true;
}
else
return;
}
bool foundDefsToKill = false;
int32_t numDefNodes = 0;
defsKilled.Clear();
TR::ILOpCode &opCode = node->getOpCode();
TR::SymbolReference *symRef;
TR::Symbol *sym;
uint16_t symIndex;
uint32_t num_aliases;
if (_useDefInfo->_useDefForRegs &&
(opCode.isLoadReg() ||
opCode.isStoreReg()))
{
sym = NULL;
symRef = NULL;
symIndex = _useDefInfo->getNumSymbols() + node->getGlobalRegisterNumber();
num_aliases = 1;
}
else
{
symRef = node->getSymbolReference();
sym = symRef->getSymbol();
symIndex = symRef->getSymbol()->getLocalIndex();
num_aliases = _useDefInfo->getNumAliases(symRef, _aux);
}
if (symIndex == NULL_USEDEF_SYMBOL_INDEX || node->getOpCode().isCall() || node->getOpCode().isFence() ||
(parent && parent->getOpCode().isResolveCheck() && num_aliases > 1))
{
// A call or unresolved reference is a definition of all
// symbols it is aliased with
//
numDefNodes = num_aliases;
//for all symbols that are a mustdef of a call, kill defs of those symbols
if (node->getOpCode().isCall())
foundDefsToKill = false;
}
else if (irrelevantStore || _useDefInfo->isExpandedDefIndex(nodeIndex))
{
// DefOnly node defines all symbols it is aliased with
// UseDef node(load) defines only the symbol itself
//
if (!irrelevantStore)
{
numDefNodes = num_aliases;
numDefNodes = _useDefInfo->isExpandedUseDefIndex(nodeIndex) ? 1 : numDefNodes;
if (!_useDefInfo->getDefsForSymbolIsZero(symIndex, _aux) &&
(!sym ||
(!sym->isShadow() &&
!sym->isMethod())))
{
foundDefsToKill = true;
// defsKilled ORed with defsForSymbol(symIndex);
_useDefInfo->getDefsForSymbol(defsKilled, symIndex, _aux);
}
if (node->getOpCode().isStoreIndirect())
{
int32_t memSymIndex = _useDefInfo->getMemorySymbolIndex(node);
if (memSymIndex != -1 &&
!_useDefInfo->getDefsForSymbolIsZero(memSymIndex, _aux))
{
foundDefsToKill = true;
// defsKilled ORed with defsForSymbol(symIndex);
_useDefInfo->getDefsForSymbol(defsKilled, memSymIndex, _aux);
//.........这里部分代码省略.........
示例10: getProperties
TR::Register *TR::AMD64SystemLinkage::buildDirectDispatch(
TR::Node *callNode,
bool spillFPRegs)
{
TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();
TR::Register *returnReg;
// Allocate adequate register dependencies.
//
// pre = number of argument registers
// post = number of volatile + return register
//
uint32_t pre = getProperties().getNumIntegerArgumentRegisters() + getProperties().getNumFloatArgumentRegisters();
uint32_t post = getProperties().getNumVolatileRegisters() + (callNode->getDataType() == TR::NoType ? 0 : 1);
#if defined (PYTHON) && 0
// Treat all preserved GP regs as volatile until register map support available.
//
post += getProperties().getNumberOfPreservedGPRegisters();
#endif
TR::RegisterDependencyConditions *preDeps = generateRegisterDependencyConditions(pre, 0, cg());
TR::RegisterDependencyConditions *postDeps = generateRegisterDependencyConditions(0, post, cg());
// Evaluate outgoing arguments on the system stack and build pre-conditions.
//
int32_t memoryArgSize = buildArgs(callNode, preDeps);
// Build post-conditions.
//
returnReg = buildVolatileAndReturnDependencies(callNode, postDeps);
postDeps->stopAddingPostConditions();
// Find the second scratch register in the post dependency list.
//
TR::Register *scratchReg = NULL;
TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
for (int32_t i=0; i<post; i++)
{
if (postDeps->getPostConditions()->getRegisterDependency(i)->getRealRegister() == scratchRegIndex)
{
scratchReg = postDeps->getPostConditions()->getRegisterDependency(i)->getRegister();
break;
}
}
#if defined(PYTHON) && 0
// For Python, store the instruction that contains the GC map at this site into
// the frame object.
//
TR::SymbolReference *frameObjectSymRef =
comp()->getSymRefTab()->findOrCreateAutoSymbol(comp()->getMethodSymbol(), 0, TR::Address, true, false, true);
TR::Register *frameObjectRegister = cg()->allocateRegister();
generateRegMemInstruction(
L8RegMem,
callNode,
frameObjectRegister,
generateX86MemoryReference(frameObjectSymRef, cg()),
cg());
TR::RealRegister *espReal = cg()->machine()->getX86RealRegister(TR::RealRegister::esp);
TR::Register *gcMapPCRegister = cg()->allocateRegister();
generateRegMemInstruction(
LEA8RegMem,
callNode,
gcMapPCRegister,
generateX86MemoryReference(espReal, -8, cg()),
cg());
// Use "volatile" registers across the call. Once proper register map support
// is implemented, r14 and r15 will no longer be volatile and a different pair
// should be chosen.
//
TR::RegisterDependencyConditions *gcMapDeps = generateRegisterDependencyConditions(0, 2, cg());
gcMapDeps->addPostCondition(frameObjectRegister, TR::RealRegister::r14, cg());
gcMapDeps->addPostCondition(gcMapPCRegister, TR::RealRegister::r15, cg());
gcMapDeps->stopAddingPostConditions();
generateMemRegInstruction(
S8MemReg,
callNode,
generateX86MemoryReference(frameObjectRegister, fe()->getPythonGCMapPCOffsetInFrame(), cg()),
gcMapPCRegister,
gcMapDeps,
cg());
cg()->stopUsingRegister(frameObjectRegister);
cg()->stopUsingRegister(gcMapPCRegister);
#endif
TR::Instruction *instr;
if (methodSymbol->getMethodAddress())
{
TR_ASSERT(scratchReg, "could not find second scratch register");
auto LoadRegisterInstruction = generateRegImm64SymInstruction(
MOV8RegImm64,
//.........这里部分代码省略.........
示例11: if
TR_ExpressionsSimplification::LoopInfo*
TR_ExpressionsSimplification::findLoopInfo(TR_RegionStructure* region)
{
ListIterator<TR::CFGEdge> exitEdges(®ion->getExitEdges());
if (region->getExitEdges().getSize() != 1)
{
if (trace())
traceMsg(comp(), "Region with more than 1 exit edges can't be handled\n");
return 0;
}
TR_StructureSubGraphNode* exitNode = toStructureSubGraphNode(exitEdges.getFirst()->getFrom());
if (!exitNode->getStructure()->asBlock())
{
if (trace())
traceMsg(comp(), "The exit block can't be found\n");
return 0;
}
TR::Block *exitBlock = exitNode->getStructure()->asBlock()->getBlock();
TR::Node *lastTreeInExitBlock = exitBlock->getLastRealTreeTop()->getNode();
if (trace())
{
traceMsg(comp(), "The exit block is %d\n", exitBlock->getNumber());
traceMsg(comp(), "The branch node is %p\n", lastTreeInExitBlock);
}
if (!lastTreeInExitBlock->getOpCode().isBranch())
{
if (trace())
traceMsg(comp(), "The branch node couldn't be found\n");
return 0;
}
if (lastTreeInExitBlock->getNumChildren() < 2)
{
if (trace())
traceMsg(comp(), "The branch node has less than 2 children\n");
return 0;
}
TR::Node *firstChildOfLastTree = lastTreeInExitBlock->getFirstChild();
TR::Node *secondChildOfLastTree = lastTreeInExitBlock->getSecondChild();
if (!firstChildOfLastTree->getOpCode().hasSymbolReference())
{
if (trace())
traceMsg(comp(), "The branch node's first child node %p - its opcode does not have a symbol reference\n", firstChildOfLastTree);
return 0;
}
TR::SymbolReference *firstChildSymRef = firstChildOfLastTree->getSymbolReference();
if (trace())
traceMsg(comp(), "Symbol Reference: %p Symbol: %p\n", firstChildSymRef, firstChildSymRef->getSymbol());
// Locate the induction variable that matches with the exit node symbol
//
TR_InductionVariable *indVar = region->findMatchingIV(firstChildSymRef);
if (!indVar) return 0;
if (!indVar->getIncr()->asIntConst())
{
if (trace())
traceMsg(comp(), "Increment is not a constant\n");
return 0;
}
int32_t increment = indVar->getIncr()->getLowInt();
_visitCount = comp()->incVisitCount();
bool indVarWrittenAndUsedUnexpectedly = false;
if (firstChildOfLastTree->getReferenceCount() > 1)
{
TR::TreeTop *cursorTreeTopInExitBlock = exitBlock->getEntry();
TR::TreeTop *exitTreeTopInExitBlock = exitBlock->getExit();
bool loadSeen = false;
while (cursorTreeTopInExitBlock != exitTreeTopInExitBlock)
{
TR::Node *cursorNode = cursorTreeTopInExitBlock->getNode();
if (checkForLoad(cursorNode, firstChildOfLastTree))
loadSeen = true;
if (!cursorNode->getOpCode().isStore() &&
(cursorNode->getNumChildren() > 0))
cursorNode = cursorNode->getFirstChild();
if (cursorNode->getOpCode().isStore() &&
(cursorNode->getSymbolReference() == firstChildSymRef))
{
indVarWrittenAndUsedUnexpectedly = true;
if ((cursorNode->getFirstChild() == firstChildOfLastTree) ||
!loadSeen)
indVarWrittenAndUsedUnexpectedly = false;
else
//.........这里部分代码省略.........
示例12: if
// Build arguments for system linkage dispatch.
//
int32_t TR::AMD64SystemLinkage::buildArgs(
TR::Node *callNode,
TR::RegisterDependencyConditions *deps)
{
TR::SymbolReference *methodSymRef = callNode->getSymbolReference();
TR::MethodSymbol *methodSymbol = methodSymRef->getSymbol()->castToMethodSymbol();
TR::RealRegister::RegNum noReg = TR::RealRegister::NoReg;
TR::RealRegister *espReal = machine()->getX86RealRegister(TR::RealRegister::esp);
int32_t firstNodeArgument = callNode->getFirstArgumentIndex();
int32_t lastNodeArgument = callNode->getNumChildren() - 1;
int32_t offset = 0;
int32_t sizeOfOutGoingArgs= 0;
uint16_t numIntArgs = 0,
numFloatArgs = 0;
int32_t first, last, direction;
int32_t numCopiedRegs = 0;
TR::Register *copiedRegs[TR::X86LinkageProperties::MaxArgumentRegisters];
if (getProperties().passArgsRightToLeft())
{
first = lastNodeArgument;
last = firstNodeArgument - 1;
direction = -1;
}
else
{
first = firstNodeArgument;
last = lastNodeArgument + 1;
direction = 1;
}
// If the dispatch is indirect we must add the VFT register to the preconditions
// so that it gets register assigned with the other preconditions to the call.
//
if (callNode->getOpCode().isIndirect())
{
TR::Node *vftChild = callNode->getFirstChild();
TR_ASSERT(vftChild->getRegister(), "expecting VFT child to be evaluated");
TR::RealRegister::RegNum scratchRegIndex = getProperties().getIntegerScratchRegister(1);
deps->addPreCondition(vftChild->getRegister(), scratchRegIndex, cg());
}
int32_t i;
for (i = first; i != last; i += direction)
{
TR::parmLayoutResult layoutResult;
TR::RealRegister::RegNum rregIndex = noReg;
TR::Node *child = callNode->getChild(i);
layoutParm(child, sizeOfOutGoingArgs, numIntArgs, numFloatArgs, layoutResult);
if (layoutResult.abstract & TR::parmLayoutResult::IN_LINKAGE_REG_PAIR)
{
// TODO: AMD64 SysV ABI might put a struct into a pair of linkage registerr
TR_ASSERT(false, "haven't support linkage_reg_pair yet.\n");
}
else if (layoutResult.abstract & TR::parmLayoutResult::IN_LINKAGE_REG)
{
TR_RegisterKinds regKind = layoutResult.regs[0].regKind;
uint32_t regIndex = layoutResult.regs[0].regIndex;
TR_ASSERT(regKind == TR_GPR || regKind == TR_FPR, "linkage registers includes TR_GPR and TR_FPR\n");
rregIndex = (regKind == TR_FPR) ? getProperties().getFloatArgumentRegister(regIndex): getProperties().getIntegerArgumentRegister(regIndex);
}
else
{
offset = layoutResult.offset;
}
TR::Register *vreg;
vreg = cg()->evaluate(child);
bool needsStackOffsetUpdate = false;
if (rregIndex != noReg)
{
// For NULL JNI reference parameters, it is possible that the NULL value will be evaluated into
// a different register than the child. In that case it is not necessary to copy the temporary scratch
// register across the call.
//
if ((child->getReferenceCount() > 1) &&
(vreg == child->getRegister()))
{
TR::Register *argReg = cg()->allocateRegister();
if (vreg->containsCollectedReference())
argReg->setContainsCollectedReference();
generateRegRegInstruction(TR::Linkage::movOpcodes(RegReg, movType(child->getDataType())), child, argReg, vreg, cg());
vreg = argReg;
copiedRegs[numCopiedRegs++] = vreg;
}
deps->addPreCondition(vreg, rregIndex, cg());
}
else
{
// Ideally, we would like to push rather than move
generateMemRegInstruction(TR::Linkage::movOpcodes(MemReg, fullRegisterMovType(vreg)),
child,
generateX86MemoryReference(espReal, offset, cg()),
vreg,
//.........这里部分代码省略.........
示例13: getProperties
TR::Register *TR::ARM64SystemLinkage::buildDirectDispatch(TR::Node *callNode)
{
TR::SymbolReference *callSymRef = callNode->getSymbolReference();
const TR::ARM64LinkageProperties &pp = getProperties();
TR::RealRegister *sp = cg()->machine()->getRealRegister(pp.getStackPointerRegister());
TR::RegisterDependencyConditions *dependencies =
new (trHeapMemory()) TR::RegisterDependencyConditions(
pp.getNumberOfDependencyGPRegisters(),
pp.getNumberOfDependencyGPRegisters(), trMemory());
int32_t totalSize = buildArgs(callNode, dependencies);
if (totalSize > 0)
{
if (constantIsUnsignedImm12(totalSize))
{
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::subimmx, callNode, sp, sp, totalSize);
}
else
{
TR_ASSERT_FATAL(false, "Too many arguments.");
}
}
TR::MethodSymbol *callSymbol = callSymRef->getSymbol()->castToMethodSymbol();
generateImmSymInstruction(cg(), TR::InstOpCode::bl, callNode,
(uintptr_t)callSymbol->getMethodAddress(),
dependencies, callSymRef ? callSymRef : callNode->getSymbolReference(), NULL);
cg()->machine()->setLinkRegisterKilled(true);
if (totalSize > 0)
{
if (constantIsUnsignedImm12(totalSize))
{
generateTrg1Src1ImmInstruction(cg(), TR::InstOpCode::addimmx, callNode, sp, sp, totalSize);
}
else
{
TR_ASSERT_FATAL(false, "Too many arguments.");
}
}
TR::Register *retReg;
switch(callNode->getOpCodeValue())
{
case TR::icall:
case TR::iucall:
retReg = dependencies->searchPostConditionRegister(
pp.getIntegerReturnRegister());
break;
case TR::lcall:
case TR::lucall:
case TR::acall:
retReg = dependencies->searchPostConditionRegister(
pp.getLongReturnRegister());
break;
case TR::fcall:
case TR::dcall:
retReg = dependencies->searchPostConditionRegister(
pp.getFloatReturnRegister());
break;
case TR::call:
retReg = NULL;
break;
default:
retReg = NULL;
TR_ASSERT(false, "Unsupported direct call Opcode.");
}
callNode->setRegister(retReg);
return retReg;
}
示例14: if
TR_BitVector *
addVeryRefinedCallAliasSets(TR::ResolvedMethodSymbol * methodSymbol, TR_BitVector * aliases, List<void> * methodsPeeked)
{
TR::Compilation *comp = TR::comp();
void * methodId = methodSymbol->getResolvedMethod()->getPersistentIdentifier();
if (methodsPeeked->find(methodId))
{
// This can't be allocated into the alias region as it must be accessed across optimizations
TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable);
*heapAliases |= *aliases;
return heapAliases;
}
// stop if the peek is getting very deep
//
if (methodsPeeked->getSize() >= PEEK_THRESHOLD)
return 0;
methodsPeeked->add(methodId);
dumpOptDetails(comp, "O^O REFINING ALIASES: Peeking into the IL to refine aliases \n");
if (!methodSymbol->getResolvedMethod()->genMethodILForPeeking(methodSymbol, comp, true))
return 0;
TR::SymbolReferenceTable * symRefTab = comp->getSymRefTab();
for (TR::TreeTop * tt = methodSymbol->getFirstTreeTop(); tt; tt = tt->getNextTreeTop())
{
TR::Node *node = tt->getNode();
if (node->getOpCode().isResolveCheck())
return 0;
if ((node->getOpCodeValue() == TR::treetop) ||
(node->getOpCodeValue() == TR::compressedRefs) ||
node->getOpCode().isCheck())
node = node->getFirstChild();
if (node->getOpCode().isStore())
{
TR::SymbolReference * symRefInCallee = node->getSymbolReference(), * symRefInCaller;
TR::Symbol * symInCallee = symRefInCallee->getSymbol();
TR::DataType type = symInCallee->getDataType();
if (symInCallee->isShadow())
{
if (symInCallee->isArrayShadowSymbol())
symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayShadowIndex(type));
else if (symInCallee->isArrayletShadowSymbol())
symRefInCaller = symRefTab->getSymRef(symRefTab->getArrayletShadowIndex(type));
else
symRefInCaller = symRefTab->findShadowSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type);
if (symRefInCaller)
{
if (symRefInCaller->reallySharesSymbol(comp))
symRefInCaller->setSharedShadowAliases(aliases, symRefTab);
aliases->set(symRefInCaller->getReferenceNumber());
}
}
else if (symInCallee->isStatic())
{
symRefInCaller = symRefTab->findStaticSymbol(symRefInCallee->getOwningMethod(comp), symRefInCallee->getCPIndex(), type);
if (symRefInCaller)
{
if (symRefInCaller->reallySharesSymbol(comp))
symRefInCaller->setSharedStaticAliases(aliases, symRefTab);
else
aliases->set(symRefInCaller->getReferenceNumber());
}
}
}
else if (node->getOpCode().isCall())
{
if (node->getOpCode().isCallIndirect())
return 0;
TR::ResolvedMethodSymbol * calleeSymbol = node->getSymbol()->getResolvedMethodSymbol();
if (!calleeSymbol)
return 0;
TR_ResolvedMethod * calleeMethod = calleeSymbol->getResolvedMethod();
if (!calleeMethod->isCompilable(comp->trMemory()) || calleeMethod->isJNINative())
return 0;
if (!addVeryRefinedCallAliasSets(calleeSymbol, aliases, methodsPeeked))
return 0;
}
else if (node->getOpCodeValue() == TR::monent)
return 0;
}
// This can't be allocated into the alias region as it must be accessed across optimizations
TR_BitVector *heapAliases = new (comp->trHeapMemory()) TR_BitVector(comp->getSymRefCount(), comp->trMemory(), heapAlloc, growable);
*heapAliases |= *aliases;
return heapAliases;
}
示例15: collectSupportedNodes
// Collects nodes that involved in PRE that are not stores or checks.
// These nodes require temps.
//
bool TR_LocalAnalysisInfo::collectSupportedNodes(TR::Node *node, TR::Node *parent)
{
if (node->getVisitCount() == _visitCount)
return false;
node->setVisitCount(_visitCount);
bool flag = false;
bool childRelevant = false;
TR::ILOpCode &opCode = node->getOpCode();
int32_t i;
for (i = 0; i < node->getNumChildren(); i++)
{
TR::Node *child = node->getChild(i);
if (collectSupportedNodes(child, node))
flag = true;
if (_checkExpressions->get(child->getLocalIndex()))
childRelevant = true;
}
if (TR_LocalAnalysis::isSupportedNode(node, _compilation, parent))
{
_supportedNodesAsArray[node->getLocalIndex()] = node;
bool indirectionSafe = true;
if (opCode.isIndirect() && (opCode.isLoadVar() || opCode.isStore()))
{
indirectionSafe = false;
if (node->getFirstChild()->isThisPointer() &&
node->getFirstChild()->isNonNull())
{
indirectionSafe = true;
TR::Node *firstChild = node->getFirstChild();
TR::SymbolReference *symRef = firstChild->getSymbolReference();
int32_t len;
const char *sig = symRef->getTypeSignature(len);
TR::SymbolReference *otherSymRef = node->getSymbolReference();
TR_OpaqueClassBlock *cl = NULL;
if (sig && (len > 0))
cl = _compilation->fe()->getClassFromSignature(sig, len, symRef->getOwningMethod(_compilation));
TR_OpaqueClassBlock *otherClassObject = NULL;
int32_t otherLen;
const char *otherSig = otherSymRef->getOwningMethod(_compilation)->classNameOfFieldOrStatic(otherSymRef->getCPIndex(), otherLen);
if (otherSig)
{
otherSig = classNameToSignature(otherSig, otherLen, _compilation);
otherClassObject = _compilation->fe()->getClassFromSignature(otherSig, otherLen, otherSymRef->getOwningMethod(_compilation));
}
if (!cl ||
!otherClassObject ||
(cl != otherClassObject))
indirectionSafe = false;
}
}
if (childRelevant ||
(!indirectionSafe || (opCode.isArrayLength())) ||
(node->getOpCode().isArrayRef()) ||
(opCode.hasSymbolReference() && (node->getSymbolReference()->isUnresolved() || node->getSymbol()->isArrayShadowSymbol())) ||
(opCode.isDiv() || opCode.isRem()))
_checkExpressions->set(node->getLocalIndex());
}
return flag;
}