本文整理汇总了C++中IRInstruction::getSrc方法的典型用法代码示例。如果您正苦于以下问题:C++ IRInstruction::getSrc方法的具体用法?C++ IRInstruction::getSrc怎么用?C++ IRInstruction::getSrc使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IRInstruction
的用法示例。
在下文中一共展示了IRInstruction::getSrc方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: simplifyNot
SSATmp* Simplifier::simplifyNot(SSATmp* src) {
// const XORs are handled in simplifyXor()
assert(!src->isConst());
assert(src->getType() == Type::Bool);
IRInstruction* inst = src->getInstruction()->getSrc(0)->getInstruction();
Opcode op = inst->getOpcode();
// TODO: Add more algebraic simplification rules for NOT
switch (op) {
case OpXor: {
// !!X --> bool(X)
if (isNotInst(inst->getSrc(0))) {
return m_tb->genConvToBool(inst->getSrc(0));
}
break;
}
// !(X cmp Y) --> X opposite_cmp Y
case OpLt:
case OpLte:
case OpGt:
case OpGte:
case OpEq:
case OpNeq:
case OpSame:
case OpNSame:
return m_tb->genCmp(negateQueryOp(op), inst->getSrc(0), inst->getSrc(1));
// TODO !(X | non_zero) --> 0
default: (void)op;
}
return NULL;
}
示例2: simplifyCall
SSATmp* Simplifier::simplifyCall(IRInstruction* inst) {
auto spillVals = inst->getSrcs().subpiece(3);
IRInstruction* spillStack = m_tb->getSp()->getInstruction();
if (spillStack->getOpcode() != SpillStack) {
return nullptr;
}
SSATmp* sp = spillStack->getSrc(0);
int baseOffset = spillStack->getSrc(1)->getValInt() -
spillValueCells(spillStack);
auto const numSpillSrcs = spillVals.size();
for (int32_t i = 0; i < numSpillSrcs; i++) {
const int64_t offset = -(i + 1) + baseOffset;
assert(spillVals[i]->getType() != Type::ActRec);
IRInstruction* srcInst = spillVals[i]->getInstruction();
// If our value came from a LdStack on the same sp and offset,
// we don't need to spill it.
if (srcInst->getOpcode() == LdStack && srcInst->getSrc(0) == sp &&
srcInst->getSrc(1)->getValInt() == offset) {
spillVals[i] = m_tb->genDefNone();
}
}
// Note: although the instruction might have been modified above, we still
// need to return nullptr so that it gets cloned later if it's stack-allocated
return nullptr;
}
示例3: removeUnusedSpillsAux
void LinearScan::removeUnusedSpillsAux(Trace* trace) {
IRInstruction::List& instList = trace->getInstructionList();
for (IRInstruction::Iterator it = instList.begin();
it != instList.end(); ) {
IRInstruction::Iterator next = it; ++next;
IRInstruction* inst = *it;
if (inst->getOpcode() == Spill && inst->getDst()->getUseCount() == 0) {
instList.erase(it);
SSATmp* src = inst->getSrc(0);
if (src->decUseCount() == 0) {
Opcode srcOpc = src->getInstruction()->getOpcode();
// Not all instructions are able to take noreg as its dest
// reg. We pick LdLoc and IncRef because they occur often.
if (srcOpc == IncRef || srcOpc == LdLoc) {
for (int locIndex = 0;
locIndex < src->numNeededRegs();
++locIndex) {
src->setReg(InvalidReg, locIndex);
}
}
}
}
it = next;
}
}
示例4: simplifyNot
SSATmp* Simplifier::simplifyNot(SSATmp* src) {
IRInstruction* inst = src->getInstruction();
Opcode op = inst->getOpcode();
// TODO: Add more algebraic simplification rules for NOT
switch (op) {
case ConvToBool:
return simplifyNot(inst->getSrc(0));
case OpXor: {
// !!X --> bool(X)
if (isNotInst(inst->getSrc(0))) {
return m_tb->genConvToBool(inst->getSrc(0));
}
break;
}
// !(X cmp Y) --> X opposite_cmp Y
case OpLt:
case OpLte:
case OpGt:
case OpGte:
case OpEq:
case OpNeq:
case OpSame:
case OpNSame:
// XXX: this could technically be losing a ConvToBool, except
// that we kinda know "not" instructions (Xor with 1) are always
// going to be followed by ConvToBool.
//
// TODO(#2058865): This would make more sense with a real Not
// instruction and allowing boolean output types for query ops.
return m_tb->genCmp(negateQueryOp(op),
inst->getSrc(0),
inst->getSrc(1));
case InstanceOf:
case NInstanceOf:
case InstanceOfBitmask:
case NInstanceOfBitmask:
// TODO: combine this with the above check and use isQueryOp or
// add an isNegatable.
return m_tb->gen(negateQueryOp(op),
inst->getNumSrcs(),
inst->getSrcs().begin());
// TODO !(X | non_zero) --> 0
default: (void)op;
}
return nullptr;
}
示例5: initInstructions
void initInstructions(Trace* trace, IRInstruction::List& wl) {
IRInstruction::List instructions = trace->getInstructionList();
IRInstruction::Iterator it;
bool unreachable = false;
TRACE(5, "DCE:vvvvvvvvvvvvvvvvvvvv\n");
for (it = instructions.begin(); it != instructions.end(); it++) {
IRInstruction* inst = *it;
ASSERT(inst->getParent() == trace);
Simplifier::copyProp(inst);
// if this is a load that does not generate a guard, then get rid
// of its label so that its not an essential control-flow
// instruction
if (isUnguardedLoad(inst)) {
// LdStack and LdLoc instructions that produce generic types
// and LdStack instruction that produce Cell types will not
// generate guards, so remove the label from this instruction so
// that its no longer an essential control-flow instruction
inst->setLabel(NULL);
}
Opcode opc = inst->getOpcode();
// decref of anything that isn't ref counted is a nop
if ((opc == DecRef || opc == DecRefNZ) && !isRefCounted(inst->getSrc(0))) {
inst->setId(DEAD);
continue;
}
if (!unreachable && inst->isControlFlowInstruction()) {
// mark the destination label so that the destination trace
// is marked reachable
inst->getLabel()->setId(LIVE);
}
if (!unreachable && isEssential(inst)) {
inst->setId(LIVE);
wl.push_back(inst);
} else {
if (moduleEnabled(HPHP::Trace::hhir, 5)) {
std::ostringstream ss1;
inst->printSrcs(ss1);
TRACE(5, "DCE: %s\n", ss1.str().c_str());
std::ostringstream ss2;
inst->print(ss2);
TRACE(5, "DCE: %s\n", ss2.str().c_str());
}
inst->setId(DEAD);
}
if (inst->getOpcode() == Jmp_) {
unreachable = true;
}
}
TRACE(5, "DCE:^^^^^^^^^^^^^^^^^^^^\n");
}
示例6: hoistGuardToLoad
/*
* Looks for whether the value in tmp was defined by a load, and if
* so, changes that load into a load that guards on the given
* type. Returns true if it succeeds.
*/
static bool hoistGuardToLoad(SSATmp* tmp, Type type) {
IRInstruction* inst = tmp->getInstruction();
switch (inst->getOpcode()) {
case Mov:
case IncRef:
{
// if inst is an incref or move, then chase down its src
if (hoistGuardToLoad(inst->getSrc(0), type)) {
// guard was successfully attached to a load instruction
// refine the type of this mov/incref
// Note: We can also further simplify incref's here if type is not
// ref-counted
tmp->setType(type);
inst->setTypeParam(type);
return true;
}
break;
}
case LdLoc:
case LdStack:
case LdMem:
case LdProp:
case LdRef:
case LdClsCns:
{
if (!inst->getTaken()) {
// Not a control flow instruction, so can't give it check semantics
break;
}
Type instType = tmp->getType();
if (instType == Type::Gen ||
(instType == Type::Cell && !type.isBoxed())) {
tmp->setType(type);
inst->setTypeParam(type);
return true;
}
break;
}
default:
break;
}
return false;
}
示例7: removeUnusedSpills
void LinearScan::removeUnusedSpills() {
for (SlotInfo& slot : m_slots) {
IRInstruction* spill = slot.m_spillTmp->getInstruction();
if (spill->getDst()->getUseCount() == 0) {
Block* block = spill->getBlock();
block->erase(block->iteratorTo(spill));
SSATmp* src = spill->getSrc(0);
if (src->decUseCount() == 0) {
Opcode srcOpc = src->getInstruction()->getOpcode();
// Not all instructions are able to take noreg as its dest
// reg. We pick LdLoc and IncRef because they occur often.
if (srcOpc == IncRef || srcOpc == LdLoc) {
for (int i = 0, n = src->numNeededRegs(); i < n; ++i) {
src->setReg(InvalidReg, i);
}
}
}
}
}
}
示例8: allocRegsToTrace
void LinearScan::allocRegsToTrace() {
// First, visit every instruction, allocating registers as we go,
// and inserting Reload instructions where necessary.
for (Block* block : m_blocks) {
// clear remembered reloads that don't dominate this block
for (SlotInfo& slot : m_slots) {
if (SSATmp* reload = slot.m_latestReload) {
if (!dominates(reload->getInstruction()->getBlock(), block, m_idoms)) {
slot.m_latestReload = nullptr;
}
}
}
for (auto it = block->begin(), end = block->end(); it != end; ++it) {
allocRegToInstruction(it);
if (RuntimeOption::EvalDumpIR > 3) {
std::cout << "--- allocated to instruction: ";
it->print(std::cout);
std::cout << "\n";
}
}
}
// Now that we have visited all instructions and inserted Reloads
// for SSATmps which needed to be spilled, we can go back and insert
// the spills. All uses must have been visited before we do this.
// For each spill slot, insert the spill right after the instruction
// that generated the value (without traversing everything else).
for (SlotInfo& slot : m_slots) {
IRInstruction* spill = slot.m_spillTmp->getInstruction();
IRInstruction* inst = spill->getSrc(0)->getInstruction();
Block* block = inst->getBlock();
if (inst->isBlockEnd()) {
block->getNext()->prepend(spill);
} else {
auto pos = block->iteratorTo(inst);
block->insert(++pos, spill);
}
}
}
示例9: optimizeRefCount
// Perform the following transformations:
// 1) Change all unconsumed IncRefs to Mov.
// 2) Mark a conditionally dead DecRefNZ as live if its corresponding IncRef
// cannot be eliminated.
void optimizeRefCount(Trace* trace) {
IRInstruction::List& instList = trace->getInstructionList();
for (IRInstruction::Iterator it = instList.begin();
it != instList.end();
++it) {
IRInstruction* inst = *it;
if (inst->getOpcode() == IncRef &&
inst->getId() != REFCOUNT_CONSUMED &&
inst->getId() != REFCOUNT_CONSUMED_OFF_TRACE) {
inst->setOpcode(Mov);
inst->setId(DEAD);
}
if (inst->getOpcode() == DecRefNZ) {
IRInstruction* srcInst = inst->getSrc(0)->getInstruction();
if (srcInst->getId() == REFCOUNT_CONSUMED ||
srcInst->getId() == REFCOUNT_CONSUMED_OFF_TRACE) {
inst->setId(LIVE);
}
}
// Do copyProp at last. When processing DecRefNZs, we still need to look at
// its source which should not be trampled over.
Simplifier::copyProp(inst);
}
}
示例10: rematerializeAux
void LinearScan::rematerializeAux(Trace* trace,
SSATmp* curSp,
SSATmp* curFp,
std::vector<SSATmp*> localValues) {
IRInstruction::List& instList = trace->getInstructionList();
for (IRInstruction::Iterator it = instList.begin();
it != instList.end();
++it) {
IRInstruction* inst = *it;
Opcode opc = inst->getOpcode();
SSATmp* dst = inst->getDst();
if (opc == DefFP || opc == FreeActRec) {
curFp = dst;
ASSERT(dst && dst->getReg() == rVmFp);
}
if (opc == Reload) {
// s = Spill t0
// t = Reload s
SSATmp* spilledTmp = getSpilledTmp(dst);
IRInstruction* spilledInst = spilledTmp->getInstruction();
IRInstruction* newInst = NULL;
if (spilledInst->isRematerializable() ||
(spilledInst->getOpcode() == LdStack &&
spilledInst->getSrc(0) == curSp)) {
// XXX: could change <newInst> to the non-check version.
// Rematerialize those rematerializable instructions (i.e.,
// isRematerializable returns true) and LdStack.
newInst = spilledInst->clone(m_irFactory);
// The new instruction needn't have an exit label, because it is always
// dominated by the original instruction.
newInst->setLabel(NULL);
} else {
// Rematerialize LdLoc.
std::vector<SSATmp*>::iterator pos =
std::find(localValues.begin(),
localValues.end(),
canonicalize(spilledTmp));
// Search for a local that stores the value of <spilledTmp>.
if (pos != localValues.end()) {
size_t locId = pos - localValues.begin();
ASSERT(curFp != NULL);
ConstInstruction constInst(curFp, Local(locId));
IRInstruction* ldHomeInst =
m_irFactory->cloneInstruction(&constInst);
newInst = m_irFactory->ldLoc(m_irFactory->getSSATmp(ldHomeInst),
dst->getType(),
NULL);
}
}
if (newInst) {
newInst->setDst(dst);
newInst->getDst()->setInstruction(newInst);
*it = newInst;
newInst->setParent(trace);
}
}
// Updating <curSp> and <localValues>.
if (dst && dst->getReg() == rVmSp) {
// <inst> modifies the stack pointer.
curSp = dst;
}
if (opc == LdLoc || opc == StLoc || opc == StLocNT) {
// dst = LdLoc home
// StLoc/StLocNT home, src
int locId = getLocalIdFromHomeOpnd(inst->getSrc(0));
SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1));
if (int(localValues.size()) < locId + 1) {
localValues.resize(locId + 1);
}
localValues[locId] = canonicalize(localValue);
}
if (inst->isControlFlowInstruction()) {
LabelInstruction* label = inst->getLabel();
if (label != NULL && label->getId() == inst->getId() + 1) {
rematerializeAux(label->getTrace(), curSp, curFp, localValues);
}
}
}
}
示例11: computePreColoringHint
// XXX: to be refactored
// This function repeats the logic in cg to pre-color tmps that are
// going to be used in next native.
void LinearScan::computePreColoringHint() {
m_preColoringHint.clear();
IRInstruction* nextNative = getNextNative();
if (nextNative == NULL) {
return;
}
auto normalHint = [&](int count, int srcBase = 0, int argBase = 0) {
for (int i = 0; i < count; ++i) {
m_preColoringHint.add(nextNative->getSrc(i + srcBase), 0,
i + argBase);
}
};
switch (nextNative->getOpcode()) {
case Box:
if (nextNative->getSrc(0)->getType() == Type::Cell) {
m_preColoringHint.add(nextNative->getSrc(0), 1, 0);
}
m_preColoringHint.add(nextNative->getSrc(0), 0, 1);
break;
case LdObjMethod:
m_preColoringHint.add(nextNative->getSrc(1), 0, 1);
m_preColoringHint.add(nextNative->getSrc(0), 0, 2);
break;
case LdFunc:
m_preColoringHint.add(nextNative->getSrc(0), 0, 1);
break;
case NativeImpl:
m_preColoringHint.add(nextNative->getSrc(1), 0, 0);
break;
case Print:
m_preColoringHint.add(nextNative->getSrc(0), 0, 0);
break;
case AddElem:
if (nextNative->getSrc(1)->getType() == Type::Int &&
nextNative->getSrc(2)->getType() == Type::Int) {
normalHint(3, 0, 1);
} else {
m_preColoringHint.add(nextNative->getSrc(0), 0, 0);
m_preColoringHint.add(nextNative->getSrc(1), 0, 1);
m_preColoringHint.add(nextNative->getSrc(2), 0, 2);
m_preColoringHint.add(nextNative->getSrc(2), 1, 3);
}
break;
case AddNewElem:
m_preColoringHint.add(nextNative->getSrc(0), 0, 0);
m_preColoringHint.add(nextNative->getSrc(1), 0, 1);
m_preColoringHint.add(nextNative->getSrc(1), 1, 2);
break;
case Concat:
{
Type::Tag lType = nextNative->getSrc(0)->getType();
Type::Tag rType = nextNative->getSrc(1)->getType();
if ((Type::isString(lType) && Type::isString(rType)) ||
(Type::isString(lType) && rType == Type::Int) ||
(lType == Type::Int && Type::isString(rType))) {
m_preColoringHint.add(nextNative->getSrc(0), 0, 0);
m_preColoringHint.add(nextNative->getSrc(1), 0, 1);
} else {
m_preColoringHint.add(nextNative->getSrc(0), 0, 1);
m_preColoringHint.add(nextNative->getSrc(1), 0, 3);
}
}
break;
case ArrayAdd:
normalHint(2);
break;
case DefFunc:
normalHint(1);
break;
case CreateCont:
normalHint(4);
break;
case FillContLocals:
normalHint(4);
break;
case OpEq:
case OpNeq:
case OpSame:
case OpNSame:
{
auto src1 = nextNative->getSrc(0);
auto src2 = nextNative->getSrc(1);
auto type1 = src1->getType();
auto type2 = src2->getType();
if ((type1 == Type::Arr && type2 == Type::Arr)
|| (Type::isString(type1) && Type::isString(type2))
|| (Type::isString(type1) && !src1->isConst())
|| (type1 == Type::Obj && type2 == Type::Obj)) {
m_preColoringHint.add(src1, 0, 0);
m_preColoringHint.add(src2, 0, 1);
}
}
break;
case Conv:
{
//.........这里部分代码省略.........
示例12: assignSpillLocAux
uint32 LinearScan::assignSpillLocAux(Trace* trace,
uint32 nextSpillLoc,
uint32 nextMmxReg) {
IRInstruction::List& instructionList = trace->getInstructionList();
for (IRInstruction::Iterator it = instructionList.begin();
it != instructionList.end();
++it) {
IRInstruction* inst = *it;
if (getNextNative() == inst) {
ASSERT(!m_natives.empty());
m_natives.pop_front();
}
if (inst->getOpcode() == Spill) {
SSATmp* dst = inst->getDst();
SSATmp* src = inst->getSrc(0);
for (int locIndex = 0;
locIndex < src->numNeededRegs();
++locIndex) {
if (dst->getLastUseId() <= getNextNativeId()) {
TRACE(3, "[counter] 1 spill a tmp that does not span native\n");
} else {
TRACE(3, "[counter] 1 spill a tmp that spans native\n");
}
const bool allowMmxSpill = RuntimeOption::EvalHHIREnableMmx &&
// The live range of the spill slot doesn't span native calls,
// and we still have free MMX registers.
dst->getLastUseId() <= getNextNativeId() &&
nextMmxReg < (uint32)NumMmxRegs;
dst->setSpillInfo(locIndex,
allowMmxSpill
? SpillInfo(RegNumber(nextMmxReg++))
: SpillInfo(nextSpillLoc++)
);
if (allowMmxSpill) {
TRACE(3, "[counter] 1 spill to mmx\n");
} else {
TRACE(3, "[counter] 1 spill to memory\n");
}
}
}
if (inst->getOpcode() == Reload) {
SSATmp* src = inst->getSrc(0);
for (int locIndex = 0;
locIndex < src->numNeededRegs();
++locIndex) {
if (src->getSpillInfo(locIndex).type() == SpillInfo::MMX) {
TRACE(3, "[counter] reload from mmx\n");
} else {
TRACE(3, "[counter] reload from memory\n");
}
}
}
if (inst->isControlFlowInstruction()) {
LabelInstruction* label = inst->getLabel();
if (label != NULL && label->getId() == inst->getId() + 1) {
nextSpillLoc = assignSpillLocAux(label->getTrace(),
nextSpillLoc,
nextMmxReg);
}
}
}
return nextSpillLoc;
}
示例13: rematerializeAux
void LinearScan::rematerializeAux(Trace* trace,
SSATmp* curSp,
SSATmp* curFp,
std::vector<SSATmp*> localValues) {
IRInstruction::List& instList = trace->getInstructionList();
for (IRInstruction::Iterator it = instList.begin();
it != instList.end();
++it) {
IRInstruction* inst = *it;
Opcode opc = inst->getOpcode();
SSATmp* dst = inst->getDst();
if (opc == DefFP || opc == FreeActRec) {
curFp = dst;
assert(dst && dst->getReg() == rVmFp);
}
if (opc == Reload) {
// s = Spill t0
// t = Reload s
SSATmp* spilledTmp = getSpilledTmp(dst);
IRInstruction* spilledInst = spilledTmp->getInstruction();
IRInstruction* newInst = NULL;
if (spilledInst->isRematerializable() ||
(spilledInst->getOpcode() == LdStack &&
spilledInst->getSrc(0) == curSp)) {
// XXX: could change <newInst> to the non-check version.
// Rematerialize those rematerializable instructions (i.e.,
// isRematerializable returns true) and LdStack.
newInst = spilledInst->clone(m_irFactory);
// The new instruction needn't have an exit label, because it is always
// dominated by the original instruction.
newInst->setLabel(NULL);
} else {
// Rematerialize LdLoc.
std::vector<SSATmp*>::iterator pos =
std::find(localValues.begin(),
localValues.end(),
canonicalize(spilledTmp));
// Search for a local that stores the value of <spilledTmp>.
if (pos != localValues.end()) {
size_t locId = pos - localValues.begin();
assert(curFp != NULL);
ConstInstruction constInst(curFp, Local(locId));
IRInstruction* ldHomeInst =
m_irFactory->cloneInstruction(&constInst);
newInst = m_irFactory->gen(LdLoc,
dst->getType(),
m_irFactory->getSSATmp(ldHomeInst));
}
}
if (newInst) {
UNUSED Type::Tag oldType = dst->getType();
newInst->setDst(dst);
dst->setInstruction(newInst);
assert(outputType(newInst) == oldType);
*it = newInst;
newInst->setParent(trace);
}
}
// Updating <curSp> and <localValues>.
if (dst && dst->getReg() == rVmSp) {
// <inst> modifies the stack pointer.
curSp = dst;
}
if (opc == LdLoc || opc == StLoc || opc == StLocNT) {
// dst = LdLoc home
// StLoc/StLocNT home, src
int locId = getLocalIdFromHomeOpnd(inst->getSrc(0));
// Note that when we implement inlining, we will need to deal
// with the new local id space of the inlined function.
SSATmp* localValue = (opc == LdLoc ? dst : inst->getSrc(1));
if (int(localValues.size()) < locId + 1) {
localValues.resize(locId + 1);
}
localValues[locId] = canonicalize(localValue);
}
// Other instructions that may have side effects on locals must
// kill the local variable values.
else if (opc == IterInit) {
int valLocId = inst->getSrc(3)->getConstValAsInt();
localValues[valLocId] = NULL;
if (inst->getNumSrcs() == 5) {
int keyLocId = inst->getSrc(4)->getConstValAsInt();
localValues[keyLocId] = NULL;
}
} else if (opc == IterNext) {
int valLocId = inst->getSrc(2)->getConstValAsInt();
localValues[valLocId] = NULL;
if (inst->getNumSrcs() == 4) {
int keyLocId = inst->getSrc(3)->getConstValAsInt();
localValues[keyLocId] = NULL;
}
}
if (inst->isControlFlowInstruction()) {
LabelInstruction* label = inst->getLabel();
if (label != NULL && label->getId() == inst->getId() + 1) {
rematerializeAux(label->getParent(), curSp, curFp, localValues);
}
}
//.........这里部分代码省略.........
示例14: eliminateDeadCode
void eliminateDeadCode(Trace* trace, IRFactory* irFactory) {
IRInstruction::List wl; // worklist of live instructions
Trace::List& exitTraces = trace->getExitTraces();
// first mark all exit traces as unreachable by setting the id on
// their labels to 0
for (Trace::Iterator it = exitTraces.begin();
it != exitTraces.end();
it++) {
Trace* trace = *it;
trace->getLabel()->setId(DEAD);
}
// mark the essential instructions and add them to the initial
// work list; also mark the exit traces that are reachable by
// any control flow instruction in the main trace.
initInstructions(trace, wl);
for (Trace::Iterator it = exitTraces.begin();
it != exitTraces.end();
it++) {
// only process those exit traces that are reachable from
// the main trace
Trace* trace = *it;
if (trace->getLabel()->getId() != DEAD) {
initInstructions(trace, wl);
}
}
// process the worklist
while (!wl.empty()) {
IRInstruction* inst = wl.front();
wl.pop_front();
for (uint32 i = 0; i < inst->getNumSrcs(); i++) {
SSATmp* src = inst->getSrc(i);
if (src->getInstruction()->isDefConst()) {
continue;
}
IRInstruction* srcInst = src->getInstruction();
if (srcInst->getId() == DEAD) {
srcInst->setId(LIVE);
wl.push_back(srcInst);
}
// <inst> consumes <srcInst> which is an IncRef,
// so we mark <srcInst> as REFCOUNT_CONSUMED.
if (inst->consumesReference(i) && srcInst->getOpcode() == IncRef) {
if (inst->getParent()->isMain() || !srcInst->getParent()->isMain()) {
// <srcInst> is consumed from its own trace.
srcInst->setId(REFCOUNT_CONSUMED);
} else {
// <srcInst> is consumed off trace.
if (srcInst->getId() != REFCOUNT_CONSUMED) {
// mark <srcInst> as REFCOUNT_CONSUMED_OFF_TRACE unless it is
// also consumed from its own trace.
srcInst->setId(REFCOUNT_CONSUMED_OFF_TRACE);
}
}
}
}
}
// Optimize IncRefs and DecRefs.
optimizeRefCount(trace);
for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); ++it) {
optimizeRefCount(*it);
}
if (RuntimeOption::EvalHHIREnableSinking) {
// Sink IncRefs consumed off trace.
IRInstruction::List toSink;
sinkIncRefs(trace, irFactory, toSink);
}
// now remove instructions whose id == DEAD
removeDeadInstructions(trace);
for (Trace::Iterator it = exitTraces.begin(); it != exitTraces.end(); it++) {
removeDeadInstructions(*it);
}
// If main trace ends with an unconditional jump, copy the target of
// the jump to the end of the trace
IRInstruction::List& instList = trace->getInstructionList();
IRInstruction::Iterator lastInst = instList.end();
lastInst--; // go back to the last instruction
IRInstruction* jmpInst = *lastInst;
if (jmpInst->getOpcode() == Jmp_) {
Trace* targetTrace = jmpInst->getLabel()->getTrace();
IRInstruction::List& targetInstList = targetTrace->getInstructionList();
IRInstruction::Iterator instIter = targetInstList.begin();
instIter++; // skip over label
// update the parent trace of the moved instructions
for (IRInstruction::Iterator it = instIter;
it != targetInstList.end();
++it) {
(*it)->setParent(trace);
}
instList.splice(lastInst, targetInstList, instIter, targetInstList.end());
// delete the jump instruction
instList.erase(lastInst);
}
// If main trace ends with a conditional jump with no side-effects on exit,
//.........这里部分代码省略.........
示例15: computePreColoringHint
void LinearScan::computePreColoringHint() {
m_preColoringHint.clear();
IRInstruction* inst = getNextNative();
if (inst == nullptr) {
return;
}
Opcode opc = inst->getOpcode();
using namespace NativeCalls;
if (CallMap::hasInfo(opc)) {
unsigned reg = 0;
for (auto const& arg : CallMap::getInfo(opc).args) {
switch (arg.type) {
case SSA:
m_preColoringHint.add(inst->getSrc(arg.srcIdx), 0, reg++);
break;
case TV:
case VecKeyS:
case VecKeyIS:
m_preColoringHint.add(inst->getSrc(arg.srcIdx), 0, reg++);
m_preColoringHint.add(inst->getSrc(arg.srcIdx), 1, reg++);
break;
}
}
return;
}
// For instructions that want to hint a continuous increasing range
// of sources to a continuous increasing range of argument
// registers.
auto normalHint = [&](int count, int srcBase = 0, int argBase = 0) {
for (int i = 0; i < count; ++i) {
m_preColoringHint.add(inst->getSrc(i + srcBase), 0,
i + argBase);
}
};
switch (opc) {
case LdFunc:
m_preColoringHint.add(inst->getSrc(0), 0, 1);
break;
case NativeImpl:
m_preColoringHint.add(inst->getSrc(1), 0, 0);
break;
case Concat:
{
Type lType = inst->getSrc(0)->getType();
Type rType = inst->getSrc(1)->getType();
if ((lType.isString() && rType.isString()) ||
(lType.isString() && rType == Type::Int) ||
(lType == Type::Int && rType.isString())) {
m_preColoringHint.add(inst->getSrc(0), 0, 0);
m_preColoringHint.add(inst->getSrc(1), 0, 1);
} else {
m_preColoringHint.add(inst->getSrc(0), 0, 1);
m_preColoringHint.add(inst->getSrc(1), 0, 3);
}
}
break;
case AKExists:
normalHint(2);
break;
case DefFunc:
normalHint(1);
break;
case OpEq:
case OpNeq:
case OpSame:
case OpNSame:
{
auto src1 = inst->getSrc(0);
auto src2 = inst->getSrc(1);
auto type1 = src1->getType();
auto type2 = src2->getType();
if ((type1.isArray() && type2.isArray())
|| (type1.isString() && type2.isString())
|| (type1.isString() && !src1->isConst())
|| (type1 == Type::Obj && type2 == Type::Obj)) {
m_preColoringHint.add(src1, 0, 0);
m_preColoringHint.add(src2, 0, 1);
}
}
break;
case IterInit:
{
m_preColoringHint.add(inst->getSrc(0), 0, 1);
}
break;
case ConvToArr:
break;
case ConvToBool:
{
SSATmp* src = inst->getSrc(0);
Type fromType = src->getType();
if (fromType == Type::Cell) {
m_preColoringHint.add(src, 0, 0);
m_preColoringHint.add(src, 1, 1);
} else if (fromType == Type::Str ||
fromType == Type::StaticStr ||
//.........这里部分代码省略.........