本文整理汇总了C++中MDefinition类的典型用法代码示例。如果您正苦于以下问题:C++ MDefinition类的具体用法?C++ MDefinition怎么用?C++ MDefinition使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MDefinition类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: JitSpew
// Visit |def|.
bool
ValueNumberer::visitDefinition(MDefinition *def)
{
// Nop does not fit in any of the previous optimization, as its only purpose
// is to reduce the register pressure by keeping additional resume
// point. Still, there is no need consecutive list of MNop instructions, and
// this will slow down every other iteration on the Graph.
if (def->isNop()) {
MNop *nop = def->toNop();
MBasicBlock *block = nop->block();
// We look backward to know if we can remove the previous Nop, we do not
// look forward as we would not benefit from the folding made by GVN.
MInstructionReverseIterator iter = ++block->rbegin(nop);
// This nop is at the beginning of the basic block, just replace the
// resume point of the basic block by the one from the resume point.
if (iter == block->rend()) {
JitSpew(JitSpew_GVN, " Removing Nop%u", nop->id());
nop->moveResumePointAsEntry();
block->discard(nop);
return true;
}
// The previous instruction is also a Nop, no need to keep it anymore.
MInstruction *prev = *iter;
if (prev->isNop()) {
JitSpew(JitSpew_GVN, " Removing Nop%u", prev->id());
block->discard(prev);
return true;
}
return true;
}
// Skip optimizations on instructions which are recovered on bailout, to
// avoid mixing instructions which are recovered on bailouts with
// instructions which are not.
if (def->isRecoveredOnBailout())
return true;
// If this instruction has a dependency() into an unreachable block, we'll
// need to update AliasAnalysis.
MInstruction *dep = def->dependency();
if (dep != nullptr && (dep->isDiscarded() || dep->block()->isDead())) {
JitSpew(JitSpew_GVN, " AliasAnalysis invalidated");
if (updateAliasAnalysis_ && !dependenciesBroken_) {
// TODO: Recomputing alias-analysis could theoretically expose more
// GVN opportunities.
JitSpew(JitSpew_GVN, " Will recompute!");
dependenciesBroken_ = true;
}
// Temporarily clear its dependency, to protect foldsTo, which may
// wish to use the dependency to do store-to-load forwarding.
def->setDependency(def->toInstruction());
} else {
dep = nullptr;
}
// Look for a simplified form of |def|.
MDefinition *sim = simplified(def);
if (sim != def) {
if (sim == nullptr)
return false;
// If |sim| doesn't belong to a block, insert it next to |def|.
if (sim->block() == nullptr)
def->block()->insertAfter(def->toInstruction(), sim->toInstruction());
#ifdef DEBUG
JitSpew(JitSpew_GVN, " Folded %s%u to %s%u",
def->opName(), def->id(), sim->opName(), sim->id());
#endif
MOZ_ASSERT(!sim->isDiscarded());
ReplaceAllUsesWith(def, sim);
// The node's foldsTo said |def| can be replaced by |rep|. If |def| is a
// guard, then either |rep| is also a guard, or a guard isn't actually
// needed, so we can clear |def|'s guard flag and let it be discarded.
def->setNotGuardUnchecked();
if (DeadIfUnused(def)) {
if (!discardDefsRecursively(def))
return false;
// If that ended up discarding |sim|, then we're done here.
if (sim->isDiscarded())
return true;
}
// Otherwise, procede to optimize with |sim| in place of |def|.
def = sim;
}
// Now that foldsTo is done, re-enable the original dependency. Even though
// it may be pointing into a discarded block, it's still valid for the
// purposes of detecting congruent loads.
if (dep != nullptr)
def->setDependency(dep);
//.........这里部分代码省略.........
示例2: if
void
LIRGeneratorX64::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
{
MDefinition* ptr = ins->ptr();
MOZ_ASSERT(ptr->type() == MIRType_Int32);
// Case 1: the result of the operation is not used.
//
// We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
// LOCK OR, or LOCK XOR.
if (!ins->hasUses()) {
LAsmJSAtomicBinopHeapForEffect* lir =
new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(ptr),
useRegisterOrConstant(ins->value()));
add(lir, ins);
return;
}
// Case 2: the result of the operation is used.
//
// For ADD and SUB we'll use XADD with word and byte ops as
// appropriate. Any output register can be used and if value is a
// register it's best if it's the same as output:
//
// movl value, output ; if value != output
// lock xaddl output, mem
//
// For AND/OR/XOR we need to use a CMPXCHG loop, and the output is
// always in rax:
//
// movl *mem, rax
// L: mov rax, temp
// andl value, temp
// lock cmpxchg temp, mem ; reads rax also
// jnz L
// ; result in rax
//
// Note the placement of L, cmpxchg will update rax with *mem if
// *mem does not have the expected value, so reloading it at the
// top of the loop would be redundant.
bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
bool reuseInput = false;
LAllocation value;
if (bitOp || ins->value()->isConstant()) {
value = useRegisterOrConstant(ins->value());
} else {
reuseInput = true;
value = useRegisterAtStart(ins->value());
}
LAsmJSAtomicBinopHeap* lir =
new(alloc()) LAsmJSAtomicBinopHeap(useRegister(ptr),
value,
bitOp ? temp() : LDefinition::BogusTemp());
if (reuseInput)
defineReuseInput(lir, ins, LAsmJSAtomicBinopHeap::valueOp);
else if (bitOp)
defineFixed(lir, ins, LAllocation(AnyRegister(rax)));
else
define(lir, ins);
}
示例3: AnalyzeLsh
static void
AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
{
if (lsh->specialization() != MIRType_Int32)
return;
if (lsh->isRecoveredOnBailout())
return;
MDefinition* index = lsh->lhs();
MOZ_ASSERT(index->type() == MIRType_Int32);
MDefinition* shift = lsh->rhs();
if (!shift->isConstantValue())
return;
Value shiftValue = shift->constantValue();
if (!shiftValue.isInt32() || !IsShiftInScaleRange(shiftValue.toInt32()))
return;
Scale scale = ShiftToScale(shiftValue.toInt32());
int32_t displacement = 0;
MInstruction* last = lsh;
MDefinition* base = nullptr;
while (true) {
if (!last->hasOneUse())
break;
MUseIterator use = last->usesBegin();
if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isAdd())
break;
MAdd* add = use->consumer()->toDefinition()->toAdd();
if (add->specialization() != MIRType_Int32 || !add->isTruncated())
break;
MDefinition* other = add->getOperand(1 - add->indexOf(*use));
if (other->isConstantValue()) {
displacement += other->constantValue().toInt32();
} else {
if (base)
break;
base = other;
}
last = add;
if (last->isRecoveredOnBailout())
return;
}
if (!base) {
uint32_t elemSize = 1 << ScaleToShift(scale);
if (displacement % elemSize != 0)
return;
if (!last->hasOneUse())
return;
MUseIterator use = last->usesBegin();
if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isBitAnd())
return;
MBitAnd* bitAnd = use->consumer()->toDefinition()->toBitAnd();
if (bitAnd->isRecoveredOnBailout())
return;
MDefinition* other = bitAnd->getOperand(1 - bitAnd->indexOf(*use));
if (!other->isConstantValue() || !other->constantValue().isInt32())
return;
uint32_t bitsClearedByShift = elemSize - 1;
uint32_t bitsClearedByMask = ~uint32_t(other->constantValue().toInt32());
if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask)
return;
bitAnd->replaceAllUsesWith(last);
return;
}
if (base->isRecoveredOnBailout())
return;
MEffectiveAddress* eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement);
last->replaceAllUsesWith(eaddr);
last->block()->insertAfter(last, eaddr);
}
示例4: getRecoverInfo
LSnapshot*
LIRGeneratorShared::buildSnapshot(LInstruction* ins, MResumePoint* rp, BailoutKind kind)
{
LRecoverInfo* recoverInfo = getRecoverInfo(rp);
if (!recoverInfo)
return nullptr;
LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
if (!snapshot)
return nullptr;
size_t index = 0;
for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
// Check that optimized out operands are in eliminable slots.
MOZ_ASSERT(it.canOptimizeOutIfUnused());
MDefinition* ins = *it;
if (ins->isRecoveredOnBailout())
continue;
LAllocation* type = snapshot->typeOfSlot(index);
LAllocation* payload = snapshot->payloadOfSlot(index);
++index;
if (ins->isBox())
ins = ins->toBox()->getOperand(0);
// Guards should never be eliminated.
MOZ_ASSERT_IF(ins->isUnused(), !ins->isGuard());
// Snapshot operands other than constants should never be
// emitted-at-uses. Try-catch support depends on there being no
// code between an instruction and the LOsiPoint that follows it.
MOZ_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());
// The register allocation will fill these fields in with actual
// register/stack assignments. During code generation, we can restore
// interpreter state with the given information. Note that for
// constants, including known types, we record a dummy placeholder,
// since we can recover the same information, much cleaner, from MIR.
if (ins->isConstant() || ins->isUnused()) {
*type = LAllocation();
*payload = LAllocation();
} else if (ins->type() != MIRType::Value) {
*type = LAllocation();
*payload = use(ins, LUse(LUse::KEEPALIVE));
} else {
*type = useType(ins, LUse::KEEPALIVE);
*payload = usePayload(ins, LUse::KEEPALIVE);
}
}
return snapshot;
}
示例5:
void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
MDefinition* opd = ins->input();
MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
}
示例6: MOZ_ASSERT
bool
MBasicBlock::linkOsrValues(MStart *start)
{
MOZ_ASSERT(start->startType() == MStart::StartType_Osr);
MResumePoint *res = start->resumePoint();
for (uint32_t i = 0; i < stackDepth(); i++) {
MDefinition *def = slots_[i];
MInstruction *cloneRp = nullptr;
if (i == info().scopeChainSlot()) {
if (def->isOsrScopeChain())
cloneRp = def->toOsrScopeChain();
} else if (i == info().returnValueSlot()) {
if (def->isOsrReturnValue())
cloneRp = def->toOsrReturnValue();
} else if (info().hasArguments() && i == info().argsObjSlot()) {
MOZ_ASSERT(def->isConstant() || def->isOsrArgumentsObject());
MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->value() == UndefinedValue());
if (def->isOsrArgumentsObject())
cloneRp = def->toOsrArgumentsObject();
} else {
MOZ_ASSERT(def->isOsrValue() || def->isGetArgumentsObjectArg() || def->isConstant() ||
def->isParameter());
// A constant Undefined can show up here for an argument slot when the function uses
// a heavyweight argsobj, but the argument in question is stored on the scope chain.
MOZ_ASSERT_IF(def->isConstant(), def->toConstant()->value() == UndefinedValue());
if (def->isOsrValue())
cloneRp = def->toOsrValue();
else if (def->isGetArgumentsObjectArg())
cloneRp = def->toGetArgumentsObjectArg();
else if (def->isParameter())
cloneRp = def->toParameter();
}
if (cloneRp) {
MResumePoint *clone = MResumePoint::Copy(graph().alloc(), res);
if (!clone)
return false;
cloneRp->setResumePoint(clone);
}
}
return true;
}
示例7: JS_ASSERT
void
MBasicBlock::linkOsrValues(MStart *start)
{
JS_ASSERT(start->startType() == MStart::StartType_Osr);
MResumePoint *res = start->resumePoint();
for (uint32_t i = 0; i < stackDepth(); i++) {
MDefinition *def = slots_[i];
if (i == info().scopeChainSlot()) {
if (def->isOsrScopeChain())
def->toOsrScopeChain()->setResumePoint(res);
} else if (i == info().returnValueSlot()) {
if (def->isOsrReturnValue())
def->toOsrReturnValue()->setResumePoint(res);
} else if (info().hasArguments() && i == info().argsObjSlot()) {
JS_ASSERT(def->isConstant() || def->isOsrArgumentsObject());
JS_ASSERT_IF(def->isConstant(), def->toConstant()->value() == UndefinedValue());
if (def->isOsrArgumentsObject())
def->toOsrArgumentsObject()->setResumePoint(res);
} else {
JS_ASSERT(def->isOsrValue() || def->isGetArgumentsObjectArg() || def->isConstant() ||
def->isParameter());
// A constant Undefined can show up here for an argument slot when the function uses
// a heavyweight argsobj, but the argument in question is stored on the scope chain.
JS_ASSERT_IF(def->isConstant(), def->toConstant()->value() == UndefinedValue());
if (def->isOsrValue())
def->toOsrValue()->setResumePoint(res);
else if (def->isGetArgumentsObjectArg())
def->toGetArgumentsObjectArg()->setResumePoint(res);
else if (def->isParameter())
def->toParameter()->setResumePoint(res);
}
}
}
示例8: IonSpew
void
ValueNumberer::breakClass(MDefinition *def)
{
if (def->valueNumber() == def->id()) {
IonSpew(IonSpew_GVN, "Breaking congruence with itself: %d", def->id());
ValueNumberData *defdata = def->valueNumberData();
JS_ASSERT(defdata->classPrev == NULL);
// If the def was the only member of the class, then there is nothing to do.
if (defdata->classNext == NULL)
return;
// If upon closer inspection, we are still equivalent to this class
// then there isn't anything for us to do.
MDefinition *newRep = findSplit(def);
if (!newRep)
return;
ValueNumberData *newdata = newRep->valueNumberData();
// Right now, |defdata| is at the front of the list, and |newdata| is
// somewhere in the middle.
//
// We want to move |defdata| and everything up to but excluding
// |newdata| to a new list, with |defdata| still as the canonical
// element.
//
// We then want to take |newdata| and everything after, and
// mark them for processing (since |newdata| is now a new canonical
// element).
//
MDefinition *lastOld = newdata->classPrev;
JS_ASSERT(lastOld); // newRep is NOT the first element of the list.
JS_ASSERT(lastOld->valueNumberData()->classNext == newRep);
//lastOld is now the last element of the old list (congruent to
//|def|)
lastOld->valueNumberData()->classNext = NULL;
#ifdef DEBUG
for (MDefinition *tmp = def; tmp != NULL; tmp = tmp->valueNumberData()->classNext) {
JS_ASSERT(tmp->valueNumber() == def->valueNumber());
JS_ASSERT(tmp->congruentTo(def));
JS_ASSERT(tmp != newRep);
}
#endif
//|newRep| is now the first element of a new list, therefore it is the
//new canonical element. Mark the remaining elements in the list
//(including |newRep|)
newdata->classPrev = NULL;
IonSpew(IonSpew_GVN, "Choosing a new representative: %d", newRep->id());
// make the VN of every member in the class the VN of the new representative number.
for (MDefinition *tmp = newRep; tmp != NULL; tmp = tmp->valueNumberData()->classNext) {
// if this instruction is already scheduled to be processed, don't do anything.
if (tmp->isInWorklist())
continue;
IonSpew(IonSpew_GVN, "Moving to a new congruence class: %d", tmp->id());
tmp->setValueNumber(newRep->id());
markConsumers(tmp);
markDefinition(tmp);
}
// Insert the new representative => number mapping into the table
// Logically, there should not be anything in the table currently, but
// old values are never removed, so there's a good chance something will
// already be there.
values.put(newRep, newRep->id());
} else {
// The element that is breaking from the list isn't the representative element
// just strip it from the list
ValueNumberData *defdata = def->valueNumberData();
if (defdata->classPrev)
defdata->classPrev->valueNumberData()->classNext = defdata->classNext;
if (defdata->classNext)
defdata->classNext->valueNumberData()->classPrev = defdata->classPrev;
// Make sure there is no nastinees accidentally linking elements into the old list later.
defdata->classPrev = NULL;
defdata->classNext = NULL;
}
}
示例9: IonSpew
bool
ValueNumberer::computeValueNumbers()
{
// At the end of this function, we will have the value numbering stored in
// each instruction.
//
// We also need an "optimistic" value number, for temporary use, which is
// stored in a hashtable.
//
// For the instruction x := y op z, we map (op, VN[y], VN[z]) to a value
// number, say v. If it is not in the map, we use the instruction id.
//
// If the instruction in question's value number is not already
// v, we break the congruence and set it to v. We repeat until saturation.
// This will take at worst O(d) time, where d is the loop connectedness
// of the SSA def/use graph.
//
// The algorithm is the simple RPO-based algorithm from
// "SCC-Based Value Numbering" by Cooper and Simpson.
//
// If we are performing a pessimistic pass, then we assume that every
// definition is in its own congruence class, since we know nothing about
// values that enter Phi nodes through back edges. We then make one pass
// through the graph, ignoring back edges. This yields less congruences on
// any graph with back-edges, but is much faster to perform.
IonSpew(IonSpew_GVN, "Numbering instructions");
if (!values.init())
return false;
// Stick a VN object onto every mdefinition
for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
for (MDefinitionIterator iter(*block); iter; iter++)
iter->setValueNumberData(new ValueNumberData);
MControlInstruction *jump = block->lastIns();
jump->setValueNumberData(new ValueNumberData);
}
// Assign unique value numbers if pessimistic.
// It might be productive to do this in the MDefinition constructor or
// possibly in a previous pass, if it seems reasonable.
if (pessimisticPass_) {
for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
for (MDefinitionIterator iter(*block); iter; iter++)
iter->setValueNumber(iter->id());
}
} else {
// For each root block, add all of its instructions to the worklist.
markBlock(*(graph_.begin()));
if (graph_.osrBlock())
markBlock(graph_.osrBlock());
}
while (count_ > 0) {
#ifdef DEBUG
if (!pessimisticPass_) {
size_t debugCount = 0;
IonSpew(IonSpew_GVN, "The following instructions require processing:");
for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
for (MDefinitionIterator iter(*block); iter; iter++) {
if (iter->isInWorklist()) {
IonSpew(IonSpew_GVN, "\t%d", iter->id());
debugCount++;
}
}
if (block->lastIns()->isInWorklist()) {
IonSpew(IonSpew_GVN, "\t%d", block->lastIns()->id());
debugCount++;
}
}
if (!debugCount)
IonSpew(IonSpew_GVN, "\tNone");
JS_ASSERT(debugCount == count_);
}
#endif
for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
for (MDefinitionIterator iter(*block); iter; ) {
if (!isMarked(*iter)) {
iter++;
continue;
}
JS_ASSERT_IF(!pessimisticPass_, count_ > 0);
unmarkDefinition(*iter);
MDefinition *ins = simplify(*iter, false);
if (ins != *iter) {
iter = block->discardDefAt(iter);
continue;
}
uint32 value = lookupValue(ins);
if (!value)
return false; // Hashtable insertion failed
if (ins->valueNumber() != value) {
IonSpew(IonSpew_GVN,
//.........这里部分代码省略.........
示例10: JS_ASSERT
void
RangeAnalysis::analyzeLoopPhi(MBasicBlock *header, LoopIterationBound *loopBound, MPhi *phi)
{
// Given a bound on the number of backedges taken, compute an upper and
// lower bound for a phi node that may change by a constant amount each
// iteration. Unlike for the case when computing the iteration bound
// itself, the phi does not need to change the same amount every iteration,
// but is required to change at most N and be either nondecreasing or
// nonincreasing.
if (phi->numOperands() != 2)
return;
MBasicBlock *preLoop = header->loopPredecessor();
JS_ASSERT(!preLoop->isMarked() && preLoop->successorWithPhis() == header);
MBasicBlock *backedge = header->backedge();
JS_ASSERT(backedge->isMarked() && backedge->successorWithPhis() == header);
MDefinition *initial = phi->getOperand(preLoop->positionInPhiSuccessor());
if (initial->block()->isMarked())
return;
SimpleLinearSum modified = ExtractLinearSum(phi->getOperand(backedge->positionInPhiSuccessor()));
if (modified.term != phi || modified.constant == 0)
return;
if (!phi->range())
phi->setRange(new Range());
LinearSum initialSum;
if (!initialSum.add(initial, 1))
return;
// The phi may change by N each iteration, and is either nondecreasing or
// nonincreasing. initial(phi) is either a lower or upper bound for the
// phi, and initial(phi) + loopBound * N is either an upper or lower bound,
// at all points within the loop, provided that loopBound >= 0.
//
// We are more interested, however, in the bound for phi at points
// dominated by the loop bound's test; if the test dominates e.g. a bounds
// check we want to hoist from the loop, using the value of the phi at the
// head of the loop for this will usually be too imprecise to hoist the
// check. These points will execute only if the backedge executes at least
// one more time (as the test passed and the test dominates the backedge),
// so we know both that loopBound >= 1 and that the phi's value has changed
// at most loopBound - 1 times. Thus, another upper or lower bound for the
// phi is initial(phi) + (loopBound - 1) * N, without requiring us to
// ensure that loopBound >= 0.
LinearSum limitSum(loopBound->sum);
if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum))
return;
int32 negativeConstant;
if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant))
return;
if (modified.constant > 0) {
phi->range()->setSymbolicLower(new SymbolicBound(NULL, initialSum));
phi->range()->setSymbolicUpper(new SymbolicBound(loopBound, limitSum));
} else {
phi->range()->setSymbolicUpper(new SymbolicBound(NULL, initialSum));
phi->range()->setSymbolicLower(new SymbolicBound(loopBound, limitSum));
}
IonSpew(IonSpew_Range, "added symbolic range on %d", phi->id());
SpewRange(phi);
}
示例11: MOZ_ASSERT
void
LIRGeneratorX86::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
{
MOZ_ASSERT(ins->accessType() < Scalar::Float32);
MDefinition* base = ins->base();
MOZ_ASSERT(base->type() == MIRType::Int32);
bool byteArray = byteSize(ins->accessType()) == 1;
// Case 1: the result of the operation is not used.
//
// We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
// LOCK OR, or LOCK XOR. These can all take an immediate.
if (!ins->hasUses()) {
LAllocation value;
if (byteArray && !ins->value()->isConstant())
value = useFixed(ins->value(), ebx);
else
value = useRegisterOrConstant(ins->value());
LAsmJSAtomicBinopHeapForEffect* lir =
new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base), value);
lir->setAddrTemp(temp());
add(lir, ins);
return;
}
// Case 2: the result of the operation is used.
//
// For ADD and SUB we'll use XADD:
//
// movl value, output
// lock xaddl output, mem
//
// For the 8-bit variants XADD needs a byte register for the
// output only, we can still set up with movl; just pin the output
// to eax (or ebx / ecx / edx).
//
// For AND/OR/XOR we need to use a CMPXCHG loop:
//
// movl *mem, eax
// L: mov eax, temp
// andl value, temp
// lock cmpxchg temp, mem ; reads eax also
// jnz L
// ; result in eax
//
// Note the placement of L, cmpxchg will update eax with *mem if
// *mem does not have the expected value, so reloading it at the
// top of the loop would be redundant.
//
// We want to fix eax as the output. We also need a temp for
// the intermediate value.
//
// For the 8-bit variants the temp must have a byte register.
//
// There are optimization opportunities:
// - better 8-bit register allocation and instruction selection, Bug #1077036.
bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
LDefinition tempDef = LDefinition::BogusTemp();
LAllocation value;
if (byteArray) {
value = useFixed(ins->value(), ebx);
if (bitOp)
tempDef = tempFixed(ecx);
} else if (bitOp || ins->value()->isConstant()) {
value = useRegisterOrConstant(ins->value());
if (bitOp)
tempDef = temp();
} else {
value = useRegisterAtStart(ins->value());
}
LAsmJSAtomicBinopHeap* lir =
new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base), value, tempDef);
lir->setAddrTemp(temp());
if (byteArray || bitOp)
defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
else if (ins->value()->isConstant())
define(lir, ins);
else
defineReuseInput(lir, ins, LAsmJSAtomicBinopHeap::valueOp);
}
示例12: lhs
LoopIterationBound *
RangeAnalysis::analyzeLoopIterationCount(MBasicBlock *header,
MTest *test, BranchDirection direction)
{
SimpleLinearSum lhs(NULL, 0);
MDefinition *rhs;
bool lessEqual;
if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual))
return NULL;
// Ensure the rhs is a loop invariant term.
if (rhs && rhs->block()->isMarked()) {
if (lhs.term && lhs.term->block()->isMarked())
return NULL;
MDefinition *temp = lhs.term;
lhs.term = rhs;
rhs = temp;
if (!SafeSub(0, lhs.constant, &lhs.constant))
return NULL;
lessEqual = !lessEqual;
}
JS_ASSERT_IF(rhs, !rhs->block()->isMarked());
// Ensure the lhs is a phi node from the start of the loop body.
if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header)
return NULL;
// Check that the value of the lhs changes by a constant amount with each
// loop iteration. This requires that the lhs be written in every loop
// iteration with a value that is a constant difference from its value at
// the start of the iteration.
if (lhs.term->toPhi()->numOperands() != 2)
return NULL;
// The first operand of the phi should be the lhs' value at the start of
// the first executed iteration, and not a value written which could
// replace the second operand below during the middle of execution.
MDefinition *lhsInitial = lhs.term->toPhi()->getOperand(0);
if (lhsInitial->block()->isMarked())
return NULL;
// The second operand of the phi should be a value written by an add/sub
// in every loop iteration, i.e. in a block which dominates the backedge.
MDefinition *lhsWrite = lhs.term->toPhi()->getOperand(1);
if (lhsWrite->isBeta())
lhsWrite = lhsWrite->getOperand(0);
if (!lhsWrite->isAdd() && !lhsWrite->isSub())
return NULL;
if (!lhsWrite->block()->isMarked())
return NULL;
MBasicBlock *bb = header->backedge();
for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {}
if (bb != lhsWrite->block())
return NULL;
SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
// Check that the value of the lhs at the backedge is of the form
// 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
// of the iteration, and not that written to lhs in a previous iteration,
// as such a previous value could not appear directly in the addition:
// it could not be stored in lhs as the lhs add/sub executes in every
// iteration, and if it were stored in another variable its use here would
// be as an operand to a phi node for that variable.
if (lhsModified.term != lhs.term)
return NULL;
LinearSum bound;
if (lhsModified.constant == 1 && !lessEqual) {
// The value of lhs is 'initial(lhs) + iterCount' and this will end
// execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
// on the number of backedges executed is:
//
// initial(lhs) + iterCount + lhsN == rhs
// iterCount == rhsN - initial(lhs) - lhsN
if (rhs) {
if (!bound.add(rhs, 1))
return NULL;
}
if (!bound.add(lhsInitial, -1))
return NULL;
int32 lhsConstant;
if (!SafeSub(0, lhs.constant, &lhsConstant))
return NULL;
if (!bound.add(lhsConstant))
return NULL;
} else if (lhsModified.constant == -1 && lessEqual) {
// The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
// case, an upper bound on the number of backedges executed is:
//
// initial(lhs) - iterCount + lhsN == rhs
// iterCount == initial(lhs) - rhs + lhsN
if (!bound.add(lhsInitial, 1))
return NULL;
//.........这里部分代码省略.........
示例13: markBlocksInLoopBody
void
RangeAnalysis::analyzeLoop(MBasicBlock *header)
{
// Try to compute an upper bound on the number of times the loop backedge
// will be taken. Look for tests that dominate the backedge and which have
// an edge leaving the loop body.
MBasicBlock *backedge = header->backedge();
// Ignore trivial infinite loops.
if (backedge == header)
return;
markBlocksInLoopBody(header, backedge);
LoopIterationBound *iterationBound = NULL;
MBasicBlock *block = backedge;
do {
BranchDirection direction;
MTest *branch = block->immediateDominatorBranch(&direction);
if (block == block->immediateDominator())
break;
block = block->immediateDominator();
if (branch) {
direction = NegateBranchDirection(direction);
MBasicBlock *otherBlock = branch->branchSuccessor(direction);
if (!otherBlock->isMarked()) {
iterationBound =
analyzeLoopIterationCount(header, branch, direction);
if (iterationBound)
break;
}
}
} while (block != header);
if (!iterationBound) {
graph_.unmarkBlocks();
return;
}
#ifdef DEBUG
if (IonSpewEnabled(IonSpew_Range)) {
Sprinter sp(GetIonContext()->cx);
sp.init();
iterationBound->sum.print(sp);
IonSpew(IonSpew_Range, "computed symbolic bound on backedges: %s",
sp.string());
}
#endif
// Try to compute symbolic bounds for the phi nodes at the head of this
// loop, expressed in terms of the iteration bound just computed.
for (MDefinitionIterator iter(header); iter; iter++) {
MDefinition *def = *iter;
if (def->isPhi())
analyzeLoopPhi(header, iterationBound, def->toPhi());
}
// Try to hoist any bounds checks from the loop using symbolic bounds.
Vector<MBoundsCheck *, 0, IonAllocPolicy> hoistedChecks;
for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
MBasicBlock *block = *iter;
if (!block->isMarked())
continue;
for (MDefinitionIterator iter(block); iter; iter++) {
MDefinition *def = *iter;
if (def->isBoundsCheck() && def->isMovable()) {
if (tryHoistBoundsCheck(header, def->toBoundsCheck()))
hoistedChecks.append(def->toBoundsCheck());
}
}
}
// Note: replace all uses of the original bounds check with the
// actual index. This is usually done during bounds check elimination,
// but in this case it's safe to do it here since the load/store is
// definitely not loop-invariant, so we will never move it before
// one of the bounds checks we just added.
for (size_t i = 0; i < hoistedChecks.length(); i++) {
MBoundsCheck *ins = hoistedChecks[i];
ins->replaceAllUsesWith(ins->index());
ins->block()->discard(ins);
}
graph_.unmarkBlocks();
}
示例14: IonSpew
bool
RangeAnalysis::addBetaNobes()
{
IonSpew(IonSpew_Range, "Adding beta nobes");
for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
MBasicBlock *block = *i;
IonSpew(IonSpew_Range, "Looking at block %d", block->id());
BranchDirection branch_dir;
MTest *test = block->immediateDominatorBranch(&branch_dir);
if (!test || !test->getOperand(0)->isCompare())
continue;
MCompare *compare = test->getOperand(0)->toCompare();
MDefinition *left = compare->getOperand(0);
MDefinition *right = compare->getOperand(1);
int32 bound;
MDefinition *val = NULL;
JSOp jsop = compare->jsop();
if (branch_dir == FALSE_BRANCH)
jsop = analyze::NegateCompareOp(jsop);
if (left->isConstant() && left->toConstant()->value().isInt32()) {
bound = left->toConstant()->value().toInt32();
val = right;
jsop = analyze::ReverseCompareOp(jsop);
} else if (right->isConstant() && right->toConstant()->value().isInt32()) {
bound = right->toConstant()->value().toInt32();
val = left;
} else {
MDefinition *smaller = NULL;
MDefinition *greater = NULL;
if (jsop == JSOP_LT) {
smaller = left;
greater = right;
} else if (jsop == JSOP_GT) {
smaller = right;
greater = left;
}
if (smaller && greater) {
MBeta *beta;
beta = MBeta::New(smaller, new Range(JSVAL_INT_MIN, JSVAL_INT_MAX-1));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(smaller, beta, block);
beta = MBeta::New(greater, new Range(JSVAL_INT_MIN+1, JSVAL_INT_MAX));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(greater, beta, block);
}
continue;
}
JS_ASSERT(val);
Range comp;
switch (jsop) {
case JSOP_LE:
comp.setUpper(bound);
break;
case JSOP_LT:
if (!SafeSub(bound, 1, &bound))
break;
comp.setUpper(bound);
break;
case JSOP_GE:
comp.setLower(bound);
break;
case JSOP_GT:
if (!SafeAdd(bound, 1, &bound))
break;
comp.setLower(bound);
break;
case JSOP_EQ:
comp.setLower(bound);
comp.setUpper(bound);
default:
break; // well, for neq we could have
// [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges.
}
IonSpew(IonSpew_Range, "Adding beta node for %d", val->id());
MBeta *beta = MBeta::New(val, new Range(comp));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(val, beta, block);
}
return true;
}
示例15: while
bool
ion::EliminatePhis(MIRGenerator *mir, MIRGraph &graph,
Observability observe)
{
// Eliminates redundant or unobservable phis from the graph. A
// redundant phi is something like b = phi(a, a) or b = phi(a, b),
// both of which can be replaced with a. An unobservable phi is
// one that whose value is never used in the program.
//
// Note that we must be careful not to eliminate phis representing
// values that the interpreter will require later. When the graph
// is first constructed, we can be more aggressive, because there
// is a greater correspondence between the CFG and the bytecode.
// After optimizations such as GVN have been performed, however,
// the bytecode and CFG may not correspond as closely to one
// another. In that case, we must be more conservative. The flag
// |conservativeObservability| is used to indicate that eliminate
// phis is being run after some optimizations have been performed,
// and thus we should use more conservative rules about
// observability. The particular danger is that we can optimize
// away uses of a phi because we think they are not executable,
// but the foundation for that assumption is false TI information
// that will eventually be invalidated. Therefore, if
// |conservativeObservability| is set, we will consider any use
// from a resume point to be observable. Otherwise, we demand a
// use from an actual instruction.
Vector<MPhi *, 16, SystemAllocPolicy> worklist;
// Add all observable phis to a worklist. We use the "in worklist" bit to
// mean "this phi is live".
for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
if (mir->shouldCancel("Eliminate Phis (populate loop)"))
return false;
MPhiIterator iter = block->phisBegin();
while (iter != block->phisEnd()) {
// Flag all as unused, only observable phis would be marked as used
// when processed by the work list.
iter->setUnused();
// If the phi is redundant, remove it here.
if (MDefinition *redundant = IsPhiRedundant(*iter)) {
iter->replaceAllUsesWith(redundant);
iter = block->discardPhiAt(iter);
continue;
}
// Enqueue observable Phis.
if (IsPhiObservable(*iter, observe)) {
iter->setInWorklist();
if (!worklist.append(*iter))
return false;
}
iter++;
}
}
// Iteratively mark all phis reachable from live phis.
while (!worklist.empty()) {
if (mir->shouldCancel("Eliminate Phis (worklist)"))
return false;
MPhi *phi = worklist.popCopy();
JS_ASSERT(phi->isUnused());
phi->setNotInWorklist();
// The removal of Phis can produce newly redundant phis.
if (MDefinition *redundant = IsPhiRedundant(phi)) {
// Add to the worklist the used phis which are impacted.
for (MUseDefIterator it(phi); it; it++) {
if (it.def()->isPhi()) {
MPhi *use = it.def()->toPhi();
if (!use->isUnused()) {
use->setUnusedUnchecked();
use->setInWorklist();
if (!worklist.append(use))
return false;
}
}
}
phi->replaceAllUsesWith(redundant);
} else {
// Otherwise flag them as used.
phi->setNotUnused();
}
// The current phi is/was used, so all its operands are used.
for (size_t i = 0; i < phi->numOperands(); i++) {
MDefinition *in = phi->getOperand(i);
if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
continue;
in->setInWorklist();
if (!worklist.append(in->toPhi()))
return false;
}
}
// Sweep dead phis.
for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
//.........这里部分代码省略.........