本文整理汇总了C++中IRInstruction::getNumSrcs方法的典型用法代码示例。如果您正苦于以下问题:C++ IRInstruction::getNumSrcs方法的具体用法?C++ IRInstruction::getNumSrcs怎么用?C++ IRInstruction::getNumSrcs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IRInstruction
的用法示例。
在下文中一共展示了IRInstruction::getNumSrcs方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: simplifyNot
SSATmp* Simplifier::simplifyNot(SSATmp* src) {
IRInstruction* inst = src->getInstruction();
Opcode op = inst->getOpcode();
// TODO: Add more algebraic simplification rules for NOT
switch (op) {
case ConvToBool:
return simplifyNot(inst->getSrc(0));
case OpXor: {
// !!X --> bool(X)
if (isNotInst(inst->getSrc(0))) {
return m_tb->genConvToBool(inst->getSrc(0));
}
break;
}
// !(X cmp Y) --> X opposite_cmp Y
case OpLt:
case OpLte:
case OpGt:
case OpGte:
case OpEq:
case OpNeq:
case OpSame:
case OpNSame:
// XXX: this could technically be losing a ConvToBool, except
// that we kinda know "not" instructions (Xor with 1) are always
// going to be followed by ConvToBool.
//
// TODO(#2058865): This would make more sense with a real Not
// instruction and allowing boolean output types for query ops.
return m_tb->genCmp(negateQueryOp(op),
inst->getSrc(0),
inst->getSrc(1));
case InstanceOf:
case NInstanceOf:
case InstanceOfBitmask:
case NInstanceOfBitmask:
// TODO: combine this with the above check and use isQueryOp or
// add an isNegatable.
return m_tb->gen(negateQueryOp(op),
inst->getNumSrcs(),
inst->getSrcs().begin());
// TODO !(X | non_zero) --> 0
default: (void)op;
}
return nullptr;
}
示例2: eliminateDeadCode
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) {
IRInstruction::List wl; // worklist of live instructions
Trace::List& exitTraces = trace->getExitTraces();
// first mark all exit traces as unreachable by setting the id on
// their labels to 0
for (Trace::Iterator it = exitTraces.begin();
it != exitTraces.end();
it++) {
Trace* trace = *it;
trace->getLabel()->setId(DEAD);
}
// mark the essential instructions and add them to the initial
// work list; also mark the exit traces that are reachable by
// any control flow instruction in the main trace.
initInstructions(trace, wl);
for (Trace::Iterator it = exitTraces.begin();
it != exitTraces.end();
it++) {
// only process those exit traces that are reachable from
// the main trace
Trace* trace = *it;
if (trace->getLabel()->getId() != DEAD) {
initInstructions(trace, wl);
}
}
// process the worklist
while (!wl.empty()) {
IRInstruction* inst = wl.front();
wl.pop_front();
for (uint32 i = 0; i < inst->getNumSrcs(); i++) {
SSATmp* src = inst->getSrc(i);
if (src->getInstruction()->isDefConst()) {
continue;
}
IRInstruction* srcInst = src->getInstruction();
if (srcInst->getId() == DEAD) {
srcInst->setId(LIVE);
wl.push_back(srcInst);
}
// <inst> consumes <srcInst> which is an IncRef,
// so we mark <srcInst> as REFCOUNT_CONSUMED.
if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) {
if (inst->getParent()->isMain() || !srcInst->getParent()->isMain()) {
// <srcInst> is consumed from its own trace.
srcInst->setId(REFCOUNT_CONSUMED);
} else {
// <srcInst> is consumed off trace.
if (srcInst->getId() != REFCOUNT_CONSUMED) {
// mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is
// also consumed from its own trace.
srcInst->setId(REFCOUNT_CONSUMED_OFF_TRACE);
}
}
}
}
}
// Optimize IncRefs and DecRefs.
optimizeRefCount(trace);
for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); ++it) {
optimizeRefCount(*it);
}
if (RuntimeOption::EvalHHIREnableSinking) {
// Sink IncRefs consumed off trace.
IRInstruction::List toSink;
sinkIncRefs(trace, irFactory, toSink);
}
// now remove instructions whose id == DEAD
removeDeadInstructions(trace);
for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) {
removeDeadInstructions(*it);
}
// If main trace ends with an unconditional jump, copy the target of
// the jump to the end of the trace
IRInstruction::List& instList = trace->getInstructionList();
IRInstruction::Iterator lastInst = instList.end();
lastInst--; // go back to the last instruction
IRInstruction* jmpInst = *lastInst;
if (jmpInst->getOpcode() == Jmp_) {
Trace* targetTrace = jmpInst->getLabel()->getTrace();
IRInstruction::List& targetInstList = targetTrace->getInstructionList();
IRInstruction::Iterator instIter = targetInstList.begin();
instIter++; // skip over label
// update the parent trace of the moved instructions
for (IRInstruction::Iterator it = instIter;
it != targetInstList.end();
++it) {
(*it)->setParent(trace);
}
instList.splice(lastInst, targetInstList, instIter, targetInstList.end());
// delete the jump instruction
instList.erase(lastInst);
}
// If main trace ends with a conditional jump with no side-effects on exit,
//.........这里部分代码省略.........
示例3: rematerializeAux
void LinearScan::rematerializeAux(Trace* trace,
SSATmp* curSp,
SSATmp* curFp,
std::vector<SSATmp*> localValues) {
IRInstruction::List& instList = trace->getInstructionList();
for (IRInstruction::Iterator it = instList.begin();
it != instList.end();
++it) {
IRInstruction* inst = *it;
Opcode opc = inst->getOpcode();
SSATmp* dst = inst->getDst();
if (opc == DefFP || opc == FreeActRec) {
curFp = dst;
assert(dst && dst->getReg() == rVmFp);
}
if (opc == Reload) {
// s = Spill t0
// t = Reload s
SSATmp* spilledTmp = getSpilledTmp(dst);
IRInstruction* spilledInst = spilledTmp->getInstruction();
IRInstruction* newInst = NULL;
if (spilledInst->isRematerializable() ||
(spilledInst->getOpcode() == LdStack &&
spilledInst->getSrc(0) == curSp)) {
// XXX: could change <newInst> to the non-check version.
// Rematerialize those rematerializable instructions (i.e.,
// isRematerializable returns true) and LdStack.
newInst = spilledInst->clone(m_irFactory);
// The new instruction needn't have an exit label, because it is always
// dominated by the original instruction.
newInst->setLabel(NULL);
} else {
// Rematerialize LdLoc.
std::vector<SSATmp*>::iterator pos =
std::find(localValues.begin(),
localValues.end(),
canonicalize(spilledTmp));
// Search for a local that stores the value of <spilledTmp>.
if (pos != localValues.end()) {
size_t locId = pos - localValues.begin();
assert(curFp != NULL);
ConstInstruction constInst(curFp, Local(locId));
IRInstruction* ldHomeInst =
m_irFactory->cloneInstruction(&constInst);
newInst = m_irFactory->gen(LdLoc,
dst->getType(),
m_irFactory->getSSATmp(ldHomeInst));
}
}
if (newInst) {
UNUSED Type::Tag oldType = dst->getType();
newInst->setDst(dst);
dst->setInstruction(newInst);
assert(outputType(newInst) == oldType);
*it = newInst;
newInst->setParent(trace);
}
}
// Updating <curSp> and <localValues>.
if (dst && dst->getReg() == rVmSp) {
// <inst> modifies the stack pointer.
curSp = dst;
}
if (opc == LdLoc || opc == StLoc || opc == StLocNT) {
// dst = LdLoc home
// StLoc/StLocNT home, src
int locId = getLocalIdFromHomeOpnd(inst->getSrc(0));
// Note that when we implement inlining, we will need to deal
// with the new local id space of the inlined function.
SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1));
if (int(localValues.size()) < locId + 1) {
localValues.resize(locId + 1);
}
localValues[locId] = canonicalize(localValue);
}
// Other instructions that may have side effects on locals must
// kill the local variable values.
else if (opc == IterInit) {
int valLocId = inst->getSrc(3)->getConstValAsInt();
localValues[valLocId] = NULL;
if (inst->getNumSrcs() == 5) {
int keyLocId = inst->getSrc(4)->getConstValAsInt();
localValues[keyLocId] = NULL;
}
} else if (opc == IterNext) {
int valLocId = inst->getSrc(2)->getConstValAsInt();
localValues[valLocId] = NULL;
if (inst->getNumSrcs() == 4) {
int keyLocId = inst->getSrc(3)->getConstValAsInt();
localValues[keyLocId] = NULL;
}
}
if (inst->isControlFlowInstruction()) {
LabelInstruction* label = inst->getLabel();
if (label != NULL && label->getId() == inst->getId() + 1) {
rematerializeAux(label->getParent(), curSp, curFp, localValues);
}
}
//.........这里部分代码省略.........
示例4: sinkIncRefs
// Sink IncRefs consumed off trace.
// When <trace> is an exit trace, <toSink> contains all live IncRefs in the
// main trace that are consumed off trace.
void sinkIncRefs(Trace* trace,
IRFactory* irFactory,
IRInstruction::List& toSink) {
IRInstruction::List& instList = trace->getInstructionList();
IRInstruction::Iterator it;
std::map<SSATmp*, SSATmp*> sunkTmps;
if (!trace->isMain()) {
// Sink REFCOUNT_CONSUMED_OFF_TRACE IncRefs before the first non-label
// instruction, and create a mapping between the original tmps to the sunk
// tmps so that we can later replace the original ones with the sunk ones.
for (IRInstruction::ReverseIterator j = toSink.rbegin();
j != toSink.rend();
++j) {
// prependInstruction inserts an instruction to the beginning. Therefore,
// we iterate through toSink in the reversed order.
IRInstruction* sunkInst = irFactory->incRef((*j)->getSrc(0));
sunkInst->setId(LIVE);
trace->prependInstruction(sunkInst);
ASSERT((*j)->getDst());
ASSERT(!sunkTmps.count((*j)->getDst()));
sunkTmps[(*j)->getDst()] = irFactory->getSSATmp(sunkInst);
}
}
// An exit trace may be entered from multiple exit points. We keep track of
// which exit traces we already pushed sunk IncRefs to, so that we won't push
// them multiple times.
std::set<Trace*> pushedTo;
for (it = instList.begin(); it != instList.end(); ++it) {
IRInstruction* inst = *it;
if (trace->isMain()) {
if (inst->getOpcode() == IncRef) {
// Must be REFCOUNT_CONSUMED or REFCOUNT_CONSUMED_OFF_TRACE;
// otherwise, it should be already removed in optimizeRefCount.
ASSERT(inst->getId() == REFCOUNT_CONSUMED ||
inst->getId() == REFCOUNT_CONSUMED_OFF_TRACE);
if (inst->getId() == REFCOUNT_CONSUMED_OFF_TRACE) {
inst->setOpcode(Mov);
// Mark them as dead so that they'll be removed later.
inst->setId(DEAD);
// Put all REFCOUNT_CONSUMED_OFF_TRACE IncRefs to the sinking list.
toSink.push_back(inst);
}
}
if (inst->getOpcode() == DecRefNZ) {
IRInstruction* srcInst = inst->getSrc(0)->getInstruction();
if (srcInst->getId() == DEAD) {
inst->setId(DEAD);
// This may take O(I) time where I is the number of IncRefs
// in the main trace.
toSink.remove(srcInst);
}
}
if (LabelInstruction* label = inst->getLabel()) {
Trace* exitTrace = label->getTrace();
if (!pushedTo.count(exitTrace)) {
pushedTo.insert(exitTrace);
sinkIncRefs(exitTrace, irFactory, toSink);
}
}
} else {
// Replace the original tmps with the sunk tmps.
for (uint32 i = 0; i < inst->getNumSrcs(); ++i) {
SSATmp* src = inst->getSrc(i);
if (SSATmp* sunkTmp = sunkTmps[src]) {
inst->setSrc(i, sunkTmp);
}
}
}
}
// Do copyProp at last, because we need to keep REFCOUNT_CONSUMED_OFF_TRACE
// Movs as the prototypes for sunk instructions.
for (it = instList.begin(); it != instList.end(); ++it) {
Simplifier::copyProp(*it);
}
}