本文整理汇总了C++中MDefinition::block方法的典型用法代码示例。如果您正苦于以下问题:C++ MDefinition::block方法的具体用法?C++ MDefinition::block怎么用?C++ MDefinition::block使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MDefinition
的用法示例。
在下文中一共展示了MDefinition::block方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getSlot
void
MBasicBlock::inheritPhis(MBasicBlock* header)
{
MResumePoint* headerRp = header->entryResumePoint();
size_t stackDepth = headerRp->stackDepth();
for (size_t slot = 0; slot < stackDepth; slot++) {
MDefinition* exitDef = getSlot(slot);
MDefinition* loopDef = headerRp->getOperand(slot);
if (loopDef->block() != header) {
MOZ_ASSERT(loopDef->block()->id() < header->id());
MOZ_ASSERT(loopDef == exitDef);
continue;
}
// Phis are allocated by NewPendingLoopHeader.
MPhi* phi = loopDef->toPhi();
MOZ_ASSERT(phi->numOperands() == 2);
// The entry definition is always the leftmost input to the phi.
MDefinition* entryDef = phi->getOperand(0);
if (entryDef != exitDef)
continue;
// If the entryDef is the same as exitDef, then we must propagate the
// phi down to this successor. This chance was missed as part of
// setBackedge() because exits are not captured in resume points.
setSlot(slot, phi);
}
}
示例2: entryResumePoint
bool
MBasicBlock::inheritPhisFromBackedge(MBasicBlock* backedge, bool* hadTypeChange)
{
// We must be a pending loop header
MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
size_t stackDepth = entryResumePoint()->stackDepth();
for (size_t slot = 0; slot < stackDepth; slot++) {
// Get the value stack-slot of the back edge.
MDefinition* exitDef = backedge->getSlot(slot);
// Get the value of the loop header.
MDefinition* loopDef = entryResumePoint()->getOperand(slot);
if (loopDef->block() != this) {
// If we are finishing a pending loop header, then we need to ensure
// that all operands are phis. This is usualy the case, except for
// object/arrays build with generators, in which case we share the
// same allocations across all blocks.
MOZ_ASSERT(loopDef->block()->id() < id());
MOZ_ASSERT(loopDef == exitDef);
continue;
}
// Phis are allocated by NewPendingLoopHeader.
MPhi* entryDef = loopDef->toPhi();
MOZ_ASSERT(entryDef->block() == this);
if (entryDef == exitDef) {
// If the exit def is the same as the entry def, make a redundant
// phi. Since loop headers have exactly two incoming edges, we
// know that that's just the first input.
//
// Note that we eliminate later rather than now, to avoid any
// weirdness around pending continue edges which might still hold
// onto phis.
exitDef = entryDef->getOperand(0);
}
bool typeChange = false;
if (!entryDef->addInputSlow(exitDef))
return false;
if (!entryDef->checkForTypeChange(exitDef, &typeChange))
return false;
*hadTypeChange |= typeChange;
setSlot(slot, entryDef);
}
return true;
}
示例3: JitSpew
// If an equivalent and dominating value already exists in the set, return it.
// Otherwise insert |def| into the set and return it.
MDefinition*
ValueNumberer::leader(MDefinition* def)
{
// If the value isn't suitable for eliminating, don't bother hashing it. The
// convention is that congruentTo returns false for node kinds that wish to
// opt out of redundance elimination.
// TODO: It'd be nice to clean up that convention (bug 1031406).
if (!def->isEffectful() && def->congruentTo(def)) {
// Look for a match.
VisibleValues::AddPtr p = values_.findLeaderForAdd(def);
if (p) {
MDefinition* rep = *p;
if (!rep->isDiscarded() && rep->block()->dominates(def->block())) {
// We found a dominating congruent value.
return rep;
}
// The congruent value doesn't dominate. It never will again in this
// dominator tree, so overwrite it.
values_.overwrite(p, def);
} else {
// No match. Add a new entry.
if (!values_.add(p, def))
return nullptr;
}
#ifdef DEBUG
JitSpew(JitSpew_GVN, " Recording %s%u", def->opName(), def->id());
#endif
}
return def;
}
示例4: IonSpew
MDefinition *
ValueNumberer::simplify(MDefinition *def, bool useValueNumbers)
{
if (def->isEffectful())
return def;
MDefinition *ins = def->foldsTo(useValueNumbers);
if (ins == def || !ins->updateForFolding(def))
return def;
// ensure this instruction has a VN
if (!ins->valueNumberData())
ins->setValueNumberData(new ValueNumberData);
if (!ins->block()) {
// In this case, we made a new def by constant folding, for
// example, we replaced add(#3,#4) with a new const(#7) node.
// We will only fold a phi into one of its operands.
JS_ASSERT(!def->isPhi());
def->block()->insertAfter(def->toInstruction(), ins->toInstruction());
ins->setValueNumber(lookupValue(ins));
}
JS_ASSERT(ins->id() != 0);
def->replaceAllUsesWith(ins);
IonSpew(IonSpew_GVN, "Folding %d to be %d", def->id(), ins->id());
return ins;
}
示例5:
void
TypeAnalyzer::adjustPhiInputs(MPhi *phi)
{
MIRType phiType = phi->type();
if (phiType == MIRType_Double) {
// Convert int32 operands to double.
for (size_t i = 0; i < phi->numOperands(); i++) {
MDefinition *in = phi->getOperand(i);
if (in->type() == MIRType_Int32) {
MToDouble *toDouble = MToDouble::New(in);
in->block()->insertBefore(in->block()->lastIns(), toDouble);
phi->replaceOperand(i, toDouble);
} else {
JS_ASSERT(in->type() == MIRType_Double);
}
}
return;
}
if (phiType != MIRType_Value)
return;
// Box every typed input.
for (size_t i = 0; i < phi->numOperands(); i++) {
MDefinition *in = phi->getOperand(i);
if (in->type() == MIRType_Value)
continue;
if (in->isUnbox()) {
// The input is being explicitly unboxed, so sneak past and grab
// the original box.
phi->replaceOperand(i, in->toUnbox()->input());
} else {
MBox *box = MBox::New(in);
in->block()->insertBefore(in->block()->lastIns(), box);
phi->replaceOperand(i, box);
}
}
}
示例6: getSlot
bool
MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred, uint32_t popped)
{
MOZ_ASSERT(pred);
MOZ_ASSERT(predecessors_.length() > 0);
// Predecessors must be finished, and at the correct stack depth.
MOZ_ASSERT(pred->hasLastIns());
MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped);
for (uint32_t i = 0, e = stackPosition_; i < e; ++i) {
MDefinition* mine = getSlot(i);
MDefinition* other = pred->getSlot(i);
if (mine != other) {
// If the current instruction is a phi, and it was created in this
// basic block, then we have already placed this phi and should
// instead append to its operands.
if (mine->isPhi() && mine->block() == this) {
MOZ_ASSERT(predecessors_.length());
if (!mine->toPhi()->addInputSlow(other))
return false;
} else {
// Otherwise, create a new phi node.
MPhi* phi;
if (mine->type() == other->type())
phi = MPhi::New(alloc.fallible(), mine->type());
else
phi = MPhi::New(alloc.fallible());
if (!phi)
return false;
addPhi(phi);
// Prime the phi for each predecessor, so input(x) comes from
// predecessor(x).
if (!phi->reserveLength(predecessors_.length() + 1))
return false;
for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds; ++j) {
MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine);
phi->addInput(mine);
}
phi->addInput(other);
setSlot(i, phi);
if (entryResumePoint())
entryResumePoint()->replaceOperand(i, phi);
}
}
}
return predecessors_.append(pred);
}
示例7: i
static void
UnboxSimdPhi(const JitCompartment* jitCompartment, MIRGraph& graph, MPhi* phi, SimdType unboxType)
{
TempAllocator& alloc = graph.alloc();
// Unbox and replace all operands.
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
MDefinition* op = phi->getOperand(i);
MSimdUnbox* unbox = MSimdUnbox::New(alloc, op, unboxType);
op->block()->insertAtEnd(unbox);
phi->replaceOperand(i, unbox);
}
// Change the MIRType of the Phi.
MIRType mirType = SimdTypeToMIRType(unboxType);
phi->setResultType(mirType);
MBasicBlock* phiBlock = phi->block();
MInstruction* atRecover = phiBlock->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
MInstruction* at = phiBlock->safeInsertTop(atRecover);
// Note, we capture the uses-list now, as new instructions are not visited.
MUseIterator i(phi->usesBegin()), e(phi->usesEnd());
// Add a MSimdBox, and replace all the Phi uses with it.
JSObject* templateObject = jitCompartment->maybeGetSimdTemplateObjectFor(unboxType);
InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
MSimdBox* recoverBox = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
recoverBox->setRecoveredOnBailout();
phiBlock->insertBefore(atRecover, recoverBox);
MSimdBox* box = nullptr;
while (i != e) {
MUse* use = *i++;
MNode* ins = use->consumer();
if ((ins->isDefinition() && ins->toDefinition()->isRecoveredOnBailout()) ||
(ins->isResumePoint() && ins->toResumePoint()->isRecoverableOperand(use)))
{
use->replaceProducer(recoverBox);
continue;
}
if (!box) {
box = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
phiBlock->insertBefore(at, box);
}
use->replaceProducer(box);
}
}
示例8: limitSum
void
RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound, MPhi *phi)
{
// Given a bound on the number of backedges taken, compute an upper and
// lower bound for a phi node that may change by a constant amount each
// iteration. Unlike for the case when computing the iteration bound
// itself, the phi does not need to change the same amount every iteration,
// but is required to change at most N and be either nondecreasing or
// nonincreasing.
if (phi->numOperands() != 2)
return;
MBasicBlock *preLoop = header->loopPredecessor();
JS_ASSERT(!preLoop->isMarked() && preLoop->successorWithPhis() == header);
MBasicBlock *backedge = header->backedge();
JS_ASSERT(backedge->isMarked() && backedge->successorWithPhis() == header);
MDefinition *initial = phi->getOperand(preLoop->positionInPhiSuccessor());
if (initial->block()->isMarked())
return;
SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor()));
if (modified.term != phi || modified.constant == 0)
return;
if (!phi->range())
phi->setRange(new Range());
LinearSum initialSum;
if (!initialSum.add(initial, 1))
return;
// The phi may change by N each iteration, and is either nondecreasing or
// nonincreasing. initial(phi) is either a lower or upper bound for the
// phi, and initial(phi) + loopBound * N is either an upper or lower bound,
// at all points within the loop, provided that loopBound >= 0.
//
// We are more interested, however, in the bound for phi at points
// dominated by the loop bound's test; if the test dominates e.g. a bounds
// check we want to hoist from the loop, using the value of the phi at the
// head of the loop for this will usually be too imprecise to hoist the
// check. These points will execute only if the backedge executes at least
// one more time (as the test passed and the test dominates the backedge),
// so we know both that loopBound >= 1 and that the phi's value has changed
// at most loopBound - 1 times. Thus, another upper or lower bound for the
// phi is initial(phi) + (loopBound - 1) * N, without requiring us to
// ensure that loopBound >= 0.
LinearSum limitSum(loopBound->sum);
if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum))
return;
int32_t negativeConstant;
if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant))
return;
if (modified.constant > 0) {
phi->range()->setSymbolicLower(new SymbolicBound(NULL, initialSum));
phi->range()->setSymbolicUpper(new SymbolicBound(loopBound, limitSum));
} else {
phi->range()->setSymbolicUpper(new SymbolicBound(NULL, initialSum));
phi->range()->setSymbolicLower(new SymbolicBound(loopBound, limitSum));
}
IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id());
SpewRange(phi);
}
示例9: lhs
LoopIterationBound *
RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header,
MTest *test, BranchDirection direction)
{
SimpleLinearSum lhs(NULL, 0);
MDefinition *rhs;
bool lessEqual;
if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual))
return NULL;
// Ensure the rhs is a loop invariant term.
if (rhs && rhs->block()->isMarked()) {
if (lhs.term && lhs.term->block()->isMarked())
return NULL;
MDefinition *temp = lhs.term;
lhs.term = rhs;
rhs = temp;
if (!SafeSub(0, lhs.constant, &lhs.constant))
return NULL;
lessEqual = !lessEqual;
}
JS_ASSERT_IF(rhs, !rhs->block()->isMarked());
// Ensure the lhs is a phi node from the start of the loop body.
if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header)
return NULL;
// Check that the value of the lhs changes by a constant amount with each
// loop iteration. This requires that the lhs be written in every loop
// iteration with a value that is a constant difference from its value at
// the start of the iteration.
if (lhs.term->toPhi()->numOperands() != 2)
return NULL;
// The first operand of the phi should be the lhs' value at the start of
// the first executed iteration, and not a value written which could
// replace the second operand below during the middle of execution.
MDefinition *lhsInitial = lhs.term->toPhi()->getOperand(0);
if (lhsInitial->block()->isMarked())
return NULL;
// The second operand of the phi should be a value written by an add/sub
// in every loop iteration, i.e. in a block which dominates the backedge.
MDefinition *lhsWrite = lhs.term->toPhi()->getOperand(1);
if (lhsWrite->isBeta())
lhsWrite = lhsWrite->getOperand(0);
if (!lhsWrite->isAdd() && !lhsWrite->isSub())
return NULL;
if (!lhsWrite->block()->isMarked())
return NULL;
MBasicBlock *bb = header->backedge();
for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {}
if (bb != lhsWrite->block())
return NULL;
SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
// Check that the value of the lhs at the backedge is of the form
// 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
// of the iteration, and not that written to lhs in a previous iteration,
// as such a previous value could not appear directly in the addition:
// it could not be stored in lhs as the lhs add/sub executes in every
// iteration, and if it were stored in another variable its use here would
// be as an operand to a phi node for that variable.
if (lhsModified.term != lhs.term)
return NULL;
LinearSum bound;
if (lhsModified.constant == 1 && !lessEqual) {
// The value of lhs is 'initial(lhs) + iterCount' and this will end
// execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
// on the number of backedges executed is:
//
// initial(lhs) + iterCount + lhsN == rhs
// iterCount == rhsN - initial(lhs) - lhsN
if (rhs) {
if (!bound.add(rhs, 1))
return NULL;
}
if (!bound.add(lhsInitial, -1))
return NULL;
int32_t lhsConstant;
if (!SafeSub(0, lhs.constant, &lhsConstant))
return NULL;
if (!bound.add(lhsConstant))
return NULL;
} else if (lhsModified.constant == -1 && lessEqual) {
// The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
// case, an upper bound on the number of backedges executed is:
//
// initial(lhs) - iterCount + lhsN == rhs
// iterCount == initial(lhs) - rhs + lhsN
if (!bound.add(lhsInitial, 1))
return NULL;
//.........这里部分代码省略.........
示例10: JitSpew
// Visit |def|.
bool
ValueNumberer::visitDefinition(MDefinition *def)
{
// If this instruction has a dependency() into an unreachable block, we'll
// need to update AliasAnalysis.
MDefinition *dep = def->dependency();
if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) {
JitSpew(JitSpew_GVN, " AliasAnalysis invalidated");
if (updateAliasAnalysis_ && !dependenciesBroken_) {
// TODO: Recomputing alias-analysis could theoretically expose more
// GVN opportunities.
JitSpew(JitSpew_GVN, " Will recompute!");
dependenciesBroken_ = true;
}
// Temporarily clear its dependency, to protect foldsTo, which may
// wish to use the dependency to do store-to-load forwarding.
def->setDependency(def->toInstruction());
} else {
dep = nullptr;
}
// Look for a simplified form of |def|.
MDefinition *sim = simplified(def);
if (sim != def) {
if (sim == nullptr)
return false;
// If |sim| doesn't belong to a block, insert it next to |def|.
if (sim->block() == nullptr)
def->block()->insertAfter(def->toInstruction(), sim->toInstruction());
#ifdef DEBUG
JitSpew(JitSpew_GVN, " Folded %s%u to %s%u",
def->opName(), def->id(), sim->opName(), sim->id());
#endif
ReplaceAllUsesWith(def, sim);
// The node's foldsTo said |def| can be replaced by |rep|. If |def| is a
// guard, then either |rep| is also a guard, or a guard isn't actually
// needed, so we can clear |def|'s guard flag and let it be discarded.
def->setNotGuardUnchecked();
if (DeadIfUnused(def)) {
if (!discardDefsRecursively(def))
return false;
}
def = sim;
}
// Now that foldsTo is done, re-enable the original dependency. Even though
// it may be pointing into a discarded block, it's still valid for the
// purposes of detecting congruent loads.
if (dep != nullptr)
def->setDependency(dep);
// Look for a dominating def which makes |def| redundant.
MDefinition *rep = leader(def);
if (rep != def) {
if (rep == nullptr)
return false;
if (rep->updateForReplacement(def)) {
#ifdef DEBUG
JitSpew(JitSpew_GVN,
" Replacing %s%u with %s%u",
def->opName(), def->id(), rep->opName(), rep->id());
#endif
ReplaceAllUsesWith(def, rep);
// The node's congruentTo said |def| is congruent to |rep|, and it's
// dominated by |rep|. If |def| is a guard, it's covered by |rep|,
// so we can clear |def|'s guard flag and let it be discarded.
def->setNotGuardUnchecked();
if (DeadIfUnused(def)) {
// discardDef should not add anything to the deadDefs, as the
// redundant operation should have the same input operands.
mozilla::DebugOnly<bool> r = discardDef(def);
MOZ_ASSERT(r, "discardDef shouldn't have tried to add anything to the worklist, "
"so it shouldn't have failed");
MOZ_ASSERT(deadDefs_.empty(),
"discardDef shouldn't have added anything to the worklist");
}
def = rep;
}
}
return true;
}
示例11: new
MBasicBlock*
MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred, size_t predEdgeIdx, MBasicBlock* succ)
{
MBasicBlock* split = nullptr;
if (!succ->pc()) {
// The predecessor does not have a PC, this is a Wasm compilation.
split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
if (!split)
return nullptr;
} else {
// The predecessor has a PC, this is an IonBuilder compilation.
MResumePoint* succEntry = succ->entryResumePoint();
BytecodeSite* site = new(graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
split = new(graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);
if (!split->init())
return nullptr;
// A split edge is used to simplify the graph to avoid having a
// predecessor with multiple successors as well as a successor with
// multiple predecessors. As instructions can be moved in this
// split-edge block, we need to give this block a resume point. To do
// so, we copy the entry resume points of the successor and filter the
// phis to keep inputs from the current edge.
// Propagate the caller resume point from the inherited block.
split->callerResumePoint_ = succ->callerResumePoint();
// Split-edge are created after the interpreter stack emulation. Thus,
// there is no need for creating slots.
split->stackPosition_ = succEntry->stackDepth();
// Create a resume point using our initial stack position.
MResumePoint* splitEntry = new(graph.alloc()) MResumePoint(split, succEntry->pc(),
MResumePoint::ResumeAt);
if (!splitEntry->init(graph.alloc()))
return nullptr;
split->entryResumePoint_ = splitEntry;
// The target entry resume point might have phi operands, keep the
// operands of the phi coming from our edge.
size_t succEdgeIdx = succ->indexForPredecessor(pred);
for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
MDefinition* def = succEntry->getOperand(i);
// This early in the pipeline, we have no recover instructions in
// any entry resume point.
MOZ_ASSERT_IF(def->block() == succ, def->isPhi());
if (def->block() == succ)
def = def->toPhi()->getOperand(succEdgeIdx);
splitEntry->initOperand(i, def);
}
// This is done in the New variant for wasm, so we cannot keep this
// line below, where the rest of the graph is modified.
if (!split->predecessors_.append(pred))
return nullptr;
}
split->setLoopDepth(succ->loopDepth());
// Insert the split edge block in-between.
split->end(MGoto::New(graph.alloc(), succ));
graph.insertBlockAfter(pred, split);
pred->replaceSuccessor(predEdgeIdx, split);
succ->replacePredecessor(pred, split);
return split;
}
示例12: operands
bool
Sink(MIRGenerator* mir, MIRGraph& graph)
{
TempAllocator& alloc = graph.alloc();
bool sinkEnabled = mir->optimizationInfo().sinkEnabled();
for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
if (mir->shouldCancel("Sink"))
return false;
for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
MInstruction* ins = *iter++;
// Only instructions which can be recovered on bailout can be moved
// into the bailout paths.
if (ins->isGuard() || ins->isGuardRangeBailouts() ||
ins->isRecoveredOnBailout() || !ins->canRecoverOnBailout())
{
continue;
}
// Compute a common dominator for all uses of the current
// instruction.
bool hasLiveUses = false;
bool hasUses = false;
MBasicBlock* usesDominator = nullptr;
for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e; i++) {
hasUses = true;
MNode* consumerNode = (*i)->consumer();
if (consumerNode->isResumePoint())
continue;
MDefinition* consumer = consumerNode->toDefinition();
if (consumer->isRecoveredOnBailout())
continue;
hasLiveUses = true;
// If the instruction is a Phi, then we should dominate the
// predecessor from which the value is coming from.
MBasicBlock* consumerBlock = consumer->block();
if (consumer->isPhi())
consumerBlock = consumerBlock->getPredecessor(consumer->indexOf(*i));
usesDominator = CommonDominator(usesDominator, consumerBlock);
if (usesDominator == *block)
break;
}
// Leave this instruction for DCE.
if (!hasUses)
continue;
// We have no uses, so sink this instruction in all the bailout
// paths.
if (!hasLiveUses) {
MOZ_ASSERT(!usesDominator);
ins->setRecoveredOnBailout();
JitSpewDef(JitSpew_Sink, " No live uses, recover the instruction on bailout\n", ins);
continue;
}
// This guard is temporarly moved here as the above code deals with
// Dead Code elimination, which got moved into this Sink phase, as
// the Dead Code elimination used to move instructions with no-live
// uses to the bailout path.
if (!sinkEnabled)
continue;
// To move an effectful instruction, we would have to verify that the
// side-effect is not observed. In the mean time, we just inhibit
// this optimization on effectful instructions.
if (ins->isEffectful())
continue;
// If all the uses are under a loop, we might not want to work
// against LICM by moving everything back into the loop, but if the
// loop is it-self inside an if, then we still want to move the
// computation under this if statement.
while (block->loopDepth() < usesDominator->loopDepth()) {
MOZ_ASSERT(usesDominator != usesDominator->immediateDominator());
usesDominator = usesDominator->immediateDominator();
}
// Only move instructions if there is a branch between the dominator
// of the uses and the original instruction. This prevent moving the
// computation of the arguments into an inline function if there is
// no major win.
MBasicBlock* lastJoin = usesDominator;
while (*block != lastJoin && lastJoin->numPredecessors() == 1) {
MOZ_ASSERT(lastJoin != lastJoin->immediateDominator());
MBasicBlock* next = lastJoin->immediateDominator();
if (next->numSuccessors() > 1)
break;
lastJoin = next;
}
if (*block == lastJoin)
continue;
// Skip to the next instruction if we cannot find a common dominator
//.........这里部分代码省略.........
示例13: remainingIterationsInequality
void
LoopUnroller::go(LoopIterationBound *bound)
{
// For now we always unroll loops the same number of times.
static const size_t UnrollCount = 10;
JitSpew(JitSpew_Unrolling, "Attempting to unroll loop");
header = bound->header;
// UCE might have determined this isn't actually a loop.
if (!header->isLoopHeader())
return;
backedge = header->backedge();
oldPreheader = header->loopPredecessor();
JS_ASSERT(oldPreheader->numSuccessors() == 1);
// Only unroll loops with two blocks: an initial one ending with the
// bound's test, and the body ending with the backedge.
MTest *test = bound->test;
if (header->lastIns() != test)
return;
if (test->ifTrue() == backedge) {
if (test->ifFalse()->id() <= backedge->id())
return;
} else if (test->ifFalse() == backedge) {
if (test->ifTrue()->id() <= backedge->id())
return;
} else {
return;
}
if (backedge->numPredecessors() != 1 || backedge->numSuccessors() != 1)
return;
JS_ASSERT(backedge->phisEmpty());
MBasicBlock *bodyBlocks[] = { header, backedge };
// All instructions in the header and body must be clonable.
for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) {
MBasicBlock *block = bodyBlocks[i];
for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
MInstruction *ins = *iter;
if (ins->canClone())
continue;
if (ins->isTest() || ins->isGoto() || ins->isInterruptCheck())
continue;
#ifdef DEBUG
JitSpew(JitSpew_Unrolling, "Aborting: can't clone instruction %s", ins->opName());
#endif
return;
}
}
// Compute the linear inequality we will use for exiting the unrolled loop:
//
// iterationBound - iterationCount - UnrollCount >= 0
//
LinearSum remainingIterationsInequality(bound->boundSum);
if (!remainingIterationsInequality.add(bound->currentSum, -1))
return;
if (!remainingIterationsInequality.add(-int32_t(UnrollCount)))
return;
// Terms in the inequality need to be either loop invariant or phis from
// the original header.
for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) {
MDefinition *def = remainingIterationsInequality.term(i).term;
if (def->block()->id() < header->id())
continue;
if (def->block() == header && def->isPhi())
continue;
return;
}
// OK, we've checked everything, now unroll the loop.
JitSpew(JitSpew_Unrolling, "Unrolling loop");
// The old preheader will go before the unrolled loop, and the old loop
// will need a new empty preheader.
CompileInfo &info = oldPreheader->info();
if (header->trackedSite().pc()) {
unrolledHeader =
MBasicBlock::New(graph, nullptr, info,
oldPreheader, header->trackedSite(), MBasicBlock::LOOP_HEADER);
unrolledBackedge =
MBasicBlock::New(graph, nullptr, info,
unrolledHeader, backedge->trackedSite(), MBasicBlock::NORMAL);
newPreheader =
MBasicBlock::New(graph, nullptr, info,
unrolledHeader, oldPreheader->trackedSite(), MBasicBlock::NORMAL);
} else {
unrolledHeader = MBasicBlock::NewAsmJS(graph, info, oldPreheader, MBasicBlock::LOOP_HEADER);
unrolledBackedge = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL);
newPreheader = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL);
}
unrolledHeader->discardAllResumePoints();
//.........这里部分代码省略.........
示例14: JitSpew
// Visit |def|.
bool
ValueNumberer::visitDefinition(MDefinition* def)
{
// Nop does not fit in any of the previous optimization, as its only purpose
// is to reduce the register pressure by keeping additional resume
// point. Still, there is no need consecutive list of MNop instructions, and
// this will slow down every other iteration on the Graph.
if (def->isNop()) {
MNop* nop = def->toNop();
MBasicBlock* block = nop->block();
// We look backward to know if we can remove the previous Nop, we do not
// look forward as we would not benefit from the folding made by GVN.
MInstructionReverseIterator iter = ++block->rbegin(nop);
// This nop is at the beginning of the basic block, just replace the
// resume point of the basic block by the one from the resume point.
if (iter == block->rend()) {
JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
nop->moveResumePointAsEntry();
block->discard(nop);
return true;
}
// The previous instruction is also a Nop, no need to keep it anymore.
MInstruction* prev = *iter;
if (prev->isNop()) {
JitSpew(JitSpew_GVN, " Removing Nop%u", prev->id());
block->discard(prev);
return true;
}
// The Nop is introduced to capture the result and make sure the operands
// are not live anymore when there are no further uses. Though when
// all operands are still needed the Nop doesn't decrease the liveness
// and can get removed.
MResumePoint* rp = nop->resumePoint();
if (rp && rp->numOperands() > 0 &&
rp->getOperand(rp->numOperands() - 1) == prev &&
!nop->block()->lastIns()->isThrow() &&
!prev->isAssertRecoveredOnBailout())
{
size_t numOperandsLive = 0;
for (size_t j = 0; j < prev->numOperands(); j++) {
for (size_t i = 0; i < rp->numOperands(); i++) {
if (prev->getOperand(j) == rp->getOperand(i)) {
numOperandsLive++;
break;
}
}
}
if (numOperandsLive == prev->numOperands()) {
JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
block->discard(nop);
}
}
return true;
}
// Skip optimizations on instructions which are recovered on bailout, to
// avoid mixing instructions which are recovered on bailouts with
// instructions which are not.
if (def->isRecoveredOnBailout())
return true;
// If this instruction has a dependency() into an unreachable block, we'll
// need to update AliasAnalysis.
MDefinition* dep = def->dependency();
if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) {
JitSpew(JitSpew_GVN, " AliasAnalysis invalidated");
if (updateAliasAnalysis_ && !dependenciesBroken_) {
// TODO: Recomputing alias-analysis could theoretically expose more
// GVN opportunities.
JitSpew(JitSpew_GVN, " Will recompute!");
dependenciesBroken_ = true;
}
// Temporarily clear its dependency, to protect foldsTo, which may
// wish to use the dependency to do store-to-load forwarding.
def->setDependency(def->toInstruction());
} else {
dep = nullptr;
}
// Look for a simplified form of |def|.
MDefinition* sim = simplified(def);
if (sim != def) {
if (sim == nullptr)
return false;
bool isNewInstruction = sim->block() == nullptr;
// If |sim| doesn't belong to a block, insert it next to |def|.
if (isNewInstruction)
def->block()->insertAfter(def->toInstruction(), sim->toInstruction());
#ifdef JS_JITSPEW
JitSpew(JitSpew_GVN, " Folded %s%u to %s%u",
//.........这里部分代码省略.........