本文整理汇总了C++中LAllocation类的典型用法代码示例。如果您正苦于以下问题:C++ LAllocation类的具体用法?C++ LAllocation怎么用?C++ LAllocation使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LAllocation类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: toFloatReg
bool
LAllocation::aliases(const LAllocation& other) const
{
if (isFloatReg() && other.isFloatReg())
return toFloatReg()->reg().aliases(other.toFloatReg()->reg());
return *this == other;
}
示例2: ToRegister
bool
CodeGeneratorX86Shared::visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation *ool)
{
LInstruction *ins = ool->ins();
Register reg = ToRegister(ins->getDef(0));
mozilla::DebugOnly<LAllocation *> lhs = ins->getOperand(0);
LAllocation *rhs = ins->getOperand(1);
JS_ASSERT(reg == ToRegister(lhs));
JS_ASSERT_IF(rhs->isGeneralReg(), reg != ToRegister(rhs));
// Undo the effect of the ALU operation, which was performed on the output
// register and overflowed. Writing to the output register clobbered an
// input reg, and the original value of the input needs to be recovered
// to satisfy the constraint imposed by any RECOVERED_INPUT operands to
// the bailout snapshot.
if (rhs->isConstant()) {
Imm32 constant(ToInt32(rhs));
if (ins->isAddI())
masm.subl(constant, reg);
else
masm.addl(constant, reg);
} else {
if (ins->isAddI())
masm.subl(ToOperand(rhs), reg);
else
masm.addl(ToOperand(rhs), reg);
}
return bailout(ool->ins()->snapshot());
}
示例3: ToValue
void
CodeGeneratorMIPS64::visitUnbox(LUnbox* unbox)
{
MUnbox* mir = unbox->mir();
if (mir->fallible()) {
const ValueOperand value = ToValue(unbox, LUnbox::Input);
masm.splitTag(value, SecondScratchReg);
bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(MIRTypeToTag(mir->type())),
unbox->snapshot());
}
LAllocation* input = unbox->getOperand(LUnbox::Input);
Register result = ToRegister(unbox->output());
if (input->isRegister()) {
Register inputReg = ToRegister(input);
switch (mir->type()) {
case MIRType::Int32:
masm.unboxInt32(inputReg, result);
break;
case MIRType::Boolean:
masm.unboxBoolean(inputReg, result);
break;
case MIRType::Object:
masm.unboxObject(inputReg, result);
break;
case MIRType::String:
masm.unboxString(inputReg, result);
break;
case MIRType::Symbol:
masm.unboxSymbol(inputReg, result);
break;
default:
MOZ_CRASH("Given MIRType cannot be unboxed.");
}
return;
}
Address inputAddr = ToAddress(input);
switch (mir->type()) {
case MIRType::Int32:
masm.unboxInt32(inputAddr, result);
break;
case MIRType::Boolean:
masm.unboxBoolean(inputAddr, result);
break;
case MIRType::Object:
masm.unboxObject(inputAddr, result);
break;
case MIRType::String:
masm.unboxString(inputAddr, result);
break;
case MIRType::Symbol:
masm.unboxSymbol(inputAddr, result);
break;
default:
MOZ_CRASH("Given MIRType cannot be unboxed.");
}
}
示例4: getVirtualRegister
void
GreedyAllocator::informSnapshot(LInstruction *ins)
{
LSnapshot *snapshot = ins->snapshot();
for (size_t i = 0; i < snapshot->numEntries(); i++) {
LAllocation *a = snapshot->getEntry(i);
if (!a->isUse())
continue;
// Every definition in a snapshot gets a stack slot. This
// simplification means we can treat normal snapshots and LOsiPoint
// snapshots (which follow calls) the same, without adding a special
// exception to note that registers are spilled at the LOsiPoint.
VirtualRegister *vr = getVirtualRegister(a->toUse());
allocateStack(vr);
*a = vr->backingStack();
}
}
示例5: IonSpew
bool
GreedyAllocator::buildPhiMoves(LBlock *block)
{
IonSpew(IonSpew_RegAlloc, " Merging phi state.");
phiMoves = Mover();
MBasicBlock *mblock = block->mir();
if (!mblock->successorWithPhis())
return true;
// Insert moves from our state into our successor's phi.
uint32 pos = mblock->positionInPhiSuccessor();
LBlock *successor = mblock->successorWithPhis()->lir();
for (size_t i = 0; i < successor->numPhis(); i++) {
LPhi *phi = successor->getPhi(i);
JS_ASSERT(phi->numDefs() == 1);
VirtualRegister *phiReg = getVirtualRegister(phi->getDef(0));
allocateStack(phiReg);
LAllocation *in = phi->getOperand(pos);
VirtualRegister *inReg = getVirtualRegister(in->toUse());
allocateStack(inReg);
// Try to get a register for the input.
if (!inReg->hasRegister() && !allocatableRegs().empty(inReg->isDouble())) {
if (!allocateReg(inReg))
return false;
}
// Add a move from the input to the phi.
if (inReg->hasRegister()) {
if (!phiMoves.move(inReg->reg(), phiReg->backingStack()))
return false;
} else {
if (!phiMoves.move(inReg->backingStack(), phiReg->backingStack()))
return false;
}
}
return true;
}
示例6: MOZ_ASSERT
bool
AllocationIntegrityState::check(bool populateSafepoints)
{
MOZ_ASSERT(!instructions.empty());
#ifdef DEBUG
if (JitSpewEnabled(JitSpew_RegAlloc))
dump();
for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
LBlock* block = graph.getBlock(blockIndex);
// Check that all instruction inputs and outputs have been assigned an allocation.
for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
LInstruction* ins = *iter;
for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
MOZ_ASSERT(!alloc->isUse());
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
MOZ_ASSERT(!def->output()->isUse());
LDefinition oldDef = instructions[ins->id()].outputs[i];
MOZ_ASSERT_IF(oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
*def->output() == *ins->getOperand(oldDef.getReusedInput()));
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition* temp = ins->getTemp(i);
MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
LDefinition oldTemp = instructions[ins->id()].temps[i];
MOZ_ASSERT_IF(oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
*temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
}
}
}
#endif
// Check that the register assignment and move groups preserve the original
// semantics of the virtual registers. Each virtual register has a single
// write (owing to the SSA representation), but the allocation may move the
// written value around between registers and memory locations along
// different paths through the script.
//
// For each use of an allocation, follow the physical value which is read
// backward through the script, along all paths to the value's virtual
// register's definition.
for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
LBlock* block = graph.getBlock(blockIndex);
for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
LInstruction* ins = *iter;
const InstructionInfo& info = instructions[ins->id()];
LSafepoint* safepoint = ins->safepoint();
if (safepoint) {
for (size_t i = 0; i < ins->numTemps(); i++) {
if (ins->getTemp(i)->isBogusTemp())
continue;
uint32_t vreg = info.temps[i].virtualRegister();
LAllocation* alloc = ins->getTemp(i)->output();
if (!checkSafepointAllocation(ins, vreg, *alloc, populateSafepoints))
return false;
}
MOZ_ASSERT_IF(ins->isCall() && !populateSafepoints,
safepoint->liveRegs().emptyFloat() &&
safepoint->liveRegs().emptyGeneral());
}
size_t inputIndex = 0;
for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
LAllocation oldInput = info.inputs[inputIndex++];
if (!oldInput.isUse())
continue;
uint32_t vreg = oldInput.toUse()->virtualRegister();
if (safepoint && !oldInput.toUse()->usedAtStart()) {
if (!checkSafepointAllocation(ins, vreg, **alloc, populateSafepoints))
return false;
}
// Start checking at the previous instruction, in case this
// instruction reuses its input register for an output.
LInstructionReverseIterator riter = block->rbegin(ins);
riter++;
checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints);
while (!worklist.empty()) {
IntegrityItem item = worklist.popCopy();
checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc, populateSafepoints);
}
}
}
}
return true;
}
示例7: switch
bool
AllocationIntegrityState::checkSafepointAllocation(LInstruction* ins,
uint32_t vreg, LAllocation alloc,
bool populateSafepoints)
{
LSafepoint* safepoint = ins->safepoint();
MOZ_ASSERT(safepoint);
if (ins->isCall() && alloc.isRegister())
return true;
if (alloc.isRegister()) {
AnyRegister reg = alloc.toRegister();
if (populateSafepoints)
safepoint->addLiveRegister(reg);
MOZ_ASSERT(safepoint->liveRegs().has(reg));
}
// The |this| argument slot is implicitly included in all safepoints.
if (alloc.isArgument() && alloc.toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value))
return true;
LDefinition::Type type = virtualRegisters[vreg]
? virtualRegisters[vreg]->type()
: LDefinition::GENERAL;
switch (type) {
case LDefinition::OBJECT:
if (populateSafepoints) {
JitSpew(JitSpew_RegAlloc, "Safepoint object v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addGcPointer(alloc))
return false;
}
MOZ_ASSERT(safepoint->hasGcPointer(alloc));
break;
case LDefinition::SLOTS:
if (populateSafepoints) {
JitSpew(JitSpew_RegAlloc, "Safepoint slots v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addSlotsOrElementsPointer(alloc))
return false;
}
MOZ_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
break;
#ifdef JS_NUNBOX32
// Do not assert that safepoint information for nunbox types is complete,
// as if a vreg for a value's components are copied in multiple places
// then the safepoint information may not reflect all copies. All copies
// of payloads must be reflected, however, for generational GC.
case LDefinition::TYPE:
if (populateSafepoints) {
JitSpew(JitSpew_RegAlloc, "Safepoint type v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addNunboxType(vreg, alloc))
return false;
}
break;
case LDefinition::PAYLOAD:
if (populateSafepoints) {
JitSpew(JitSpew_RegAlloc, "Safepoint payload v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addNunboxPayload(vreg, alloc))
return false;
}
MOZ_ASSERT(safepoint->hasNunboxPayload(alloc));
break;
#else
case LDefinition::BOX:
if (populateSafepoints) {
JitSpew(JitSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addBoxedValue(alloc))
return false;
}
MOZ_ASSERT(safepoint->hasBoxedValue(alloc));
break;
#endif
default:
break;
}
return true;
}
示例8: outputOf
bool
LiveRangeAllocator<VREG>::buildLivenessInfo()
{
if (!init())
return false;
Vector<MBasicBlock *, 1, SystemAllocPolicy> loopWorkList;
BitSet *loopDone = BitSet::New(alloc(), graph.numBlockIds());
if (!loopDone)
return false;
for (size_t i = graph.numBlocks(); i > 0; i--) {
if (mir->shouldCancel("Build Liveness Info (main loop)"))
return false;
LBlock *block = graph.getBlock(i - 1);
MBasicBlock *mblock = block->mir();
BitSet *live = BitSet::New(alloc(), graph.numVirtualRegisters());
if (!live)
return false;
liveIn[mblock->id()] = live;
// Propagate liveIn from our successors to us
for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
MBasicBlock *successor = mblock->lastIns()->getSuccessor(i);
// Skip backedges, as we fix them up at the loop header.
if (mblock->id() < successor->id())
live->insertAll(liveIn[successor->id()]);
}
// Add successor phis
if (mblock->successorWithPhis()) {
LBlock *phiSuccessor = mblock->successorWithPhis()->lir();
for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
LPhi *phi = phiSuccessor->getPhi(j);
LAllocation *use = phi->getOperand(mblock->positionInPhiSuccessor());
uint32_t reg = use->toUse()->virtualRegister();
live->insert(reg);
}
}
// Variables are assumed alive for the entire block, a define shortens
// the interval to the point of definition.
for (BitSet::Iterator liveRegId(*live); liveRegId; liveRegId++) {
if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(inputOf(block->firstId()),
outputOf(block->lastId()).next()))
{
return false;
}
}
// Shorten the front end of live intervals for live variables to their
// point of definition, if found.
for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
// Calls may clobber registers, so force a spill and reload around the callsite.
if (ins->isCall()) {
for (AnyRegisterIterator iter(allRegisters_); iter.more(); iter++) {
if (forLSRA) {
if (!addFixedRangeAtHead(*iter, inputOf(*ins), outputOf(*ins)))
return false;
} else {
bool found = false;
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->isPreset() &&
*ins->getDef(i)->output() == LAllocation(*iter)) {
found = true;
break;
}
}
if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
return false;
}
}
}
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->policy() != LDefinition::PASSTHROUGH) {
LDefinition *def = ins->getDef(i);
CodePosition from;
if (def->policy() == LDefinition::PRESET && def->output()->isRegister() && forLSRA) {
// The fixed range covers the current instruction so the
// interval for the virtual register starts at the next
// instruction. If the next instruction has a fixed use,
// this can lead to unnecessary register moves. To avoid
// special handling for this, assert the next instruction
// has no fixed uses. defineFixed guarantees this by inserting
// an LNop.
JS_ASSERT(!NextInstructionHasFixedUses(block, *ins));
AnyRegister reg = def->output()->toRegister();
if (!addFixedRangeAtHead(reg, inputOf(*ins), outputOf(*ins).next()))
return false;
from = outputOf(*ins).next();
} else {
from = forLSRA ? inputOf(*ins) : outputOf(*ins);
}
if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
// MUST_REUSE_INPUT is implemented by allocating an output
//.........这里部分代码省略.........
示例9:
bool
LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type)
{
#ifdef DEBUG
MOZ_ASSERT(from != to);
for (size_t i = 0; i < moves_.length(); i++)
MOZ_ASSERT(to != moves_[i].to());
// Check that SIMD moves are aligned according to ABI requirements.
if (LDefinition(type).isSimdType()) {
MOZ_ASSERT(from.isMemory() || from.isFloatReg());
if (from.isMemory()) {
if (from.isArgument())
MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
else
MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
}
MOZ_ASSERT(to.isMemory() || to.isFloatReg());
if (to.isMemory()) {
if (to.isArgument())
MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
else
MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
}
}
#endif
return moves_.append(LMove(from, to, type));
}
示例10: switch
bool
AllocationIntegrityState::checkSafepointAllocation(LInstruction *ins,
uint32_t vreg, LAllocation alloc,
bool populateSafepoints)
{
LSafepoint *safepoint = ins->safepoint();
JS_ASSERT(safepoint);
if (ins->isCall() && alloc.isRegister())
return true;
if (alloc.isRegister()) {
AnyRegister reg = alloc.toRegister();
if (populateSafepoints)
safepoint->addLiveRegister(reg);
JS_ASSERT(safepoint->liveRegs().has(reg));
}
LDefinition::Type type = virtualRegisters[vreg]
? virtualRegisters[vreg]->type()
: LDefinition::GENERAL;
switch (type) {
case LDefinition::OBJECT:
if (populateSafepoints) {
IonSpew(IonSpew_RegAlloc, "Safepoint object v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addGcPointer(alloc))
return false;
}
JS_ASSERT(safepoint->hasGcPointer(alloc));
break;
#ifdef JS_NUNBOX32
// Do not assert that safepoint information for nunboxes is complete,
// as if a vreg for a value's components are copied in multiple places
// then the safepoint information may not reflect all copies.
// See SafepointWriter::writeNunboxParts.
case LDefinition::TYPE:
if (populateSafepoints) {
IonSpew(IonSpew_RegAlloc, "Safepoint type v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addNunboxType(vreg, alloc))
return false;
}
break;
case LDefinition::PAYLOAD:
if (populateSafepoints) {
IonSpew(IonSpew_RegAlloc, "Safepoint payload v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addNunboxPayload(vreg, alloc))
return false;
}
break;
#else
case LDefinition::BOX:
if (populateSafepoints) {
IonSpew(IonSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
vreg, ins->id(), alloc.toString());
if (!safepoint->addBoxedValue(alloc))
return false;
}
JS_ASSERT(safepoint->hasBoxedValue(alloc));
break;
#endif
default:
break;
}
return true;
}