本文整理汇总了C++中Code::frameSize方法的典型用法代码示例。如果您正苦于以下问题:C++ Code::frameSize方法的具体用法?C++ Code::frameSize怎么用?C++ Code::frameSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Code
的用法示例。
在下文中一共展示了Code::frameSize方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: repForArg
ValueRep StackmapSpecial::repForArg(Code& code, const Arg& arg)
{
switch (arg.kind()) {
case Arg::Tmp:
return ValueRep::reg(arg.reg());
break;
case Arg::Imm:
case Arg::Imm64:
return ValueRep::constant(arg.value());
break;
case Arg::Addr:
if (arg.base() == Tmp(GPRInfo::callFrameRegister))
return ValueRep::stack(arg.offset());
ASSERT(arg.base() == Tmp(MacroAssembler::stackPointerRegister));
return ValueRep::stack(arg.offset() - code.frameSize());
default:
ASSERT_NOT_REACHED();
return ValueRep();
}
}
示例2: lowerStackArgs
void lowerStackArgs(Code& code)
{
PhaseScope phaseScope(code, "lowerStackArgs");
// Now we need to deduce how much argument area we need.
for (BasicBlock* block : code) {
for (Inst& inst : *block) {
for (Arg& arg : inst.args) {
if (arg.isCallArg()) {
// For now, we assume that we use 8 bytes of the call arg. But that's not
// such an awesome assumption.
// FIXME: https://bugs.webkit.org/show_bug.cgi?id=150454
ASSERT(arg.offset() >= 0);
code.requestCallArgAreaSizeInBytes(arg.offset() + 8);
}
}
}
}
code.setFrameSize(code.frameSize() + code.callArgAreaSizeInBytes());
// Finally, transform the code to use Addr's instead of StackSlot's. This is a lossless
// transformation since we can search the StackSlots array to figure out which StackSlot any
// offset-from-FP refers to.
InsertionSet insertionSet(code);
for (BasicBlock* block : code) {
// FIXME We can keep track of the last large offset which was materialized in this block, and reuse the register
// if it hasn't been clobbered instead of renetating imm+add+addr every time. https://bugs.webkit.org/show_bug.cgi?id=171387
for (unsigned instIndex = 0; instIndex < block->size(); ++instIndex) {
Inst& inst = block->at(instIndex);
inst.forEachArg(
[&] (Arg& arg, Arg::Role role, Bank, Width width) {
auto stackAddr = [&] (Value::OffsetType offsetFromFP) -> Arg {
int32_t offsetFromSP = offsetFromFP + code.frameSize();
if (inst.admitsExtendedOffsetAddr(arg)) {
// Stackmaps and patchpoints expect addr inputs relative to SP or FP only. We might as well
// not even bother generating an addr with valid form for these opcodes since extended offset
// addr is always valid.
return Arg::extendedOffsetAddr(offsetFromFP);
}
Arg result = Arg::addr(Air::Tmp(GPRInfo::callFrameRegister), offsetFromFP);
if (result.isValidForm(width))
return result;
result = Arg::addr(Air::Tmp(MacroAssembler::stackPointerRegister), offsetFromSP);
if (result.isValidForm(width))
return result;
#if CPU(ARM64)
ASSERT(pinnedExtendedOffsetAddrRegister());
Air::Tmp tmp = Air::Tmp(*pinnedExtendedOffsetAddrRegister());
Arg largeOffset = Arg::isValidImmForm(offsetFromSP) ? Arg::imm(offsetFromSP) : Arg::bigImm(offsetFromSP);
insertionSet.insert(instIndex, Move, inst.origin, largeOffset, tmp);
insertionSet.insert(instIndex, Add64, inst.origin, Air::Tmp(MacroAssembler::stackPointerRegister), tmp);
result = Arg::addr(tmp, 0);
return result;
#elif CPU(X86_64)
// Can't happen on x86: immediates are always big enough for frame size.
RELEASE_ASSERT_NOT_REACHED();
#else
#error Unhandled architecture.
#endif
};
switch (arg.kind()) {
case Arg::Stack: {
StackSlot* slot = arg.stackSlot();
if (Arg::isZDef(role)
&& slot->kind() == StackSlotKind::Spill
&& slot->byteSize() > bytes(width)) {
// Currently we only handle this simple case because it's the only one
// that arises: ZDef's are only 32-bit right now. So, when we hit these
// assertions it means that we need to implement those other kinds of
// zero fills.
RELEASE_ASSERT(slot->byteSize() == 8);
RELEASE_ASSERT(width == Width32);
RELEASE_ASSERT(isValidForm(StoreZero32, Arg::Stack));
insertionSet.insert(
instIndex + 1, StoreZero32, inst.origin,
stackAddr(arg.offset() + 4 + slot->offsetFromFP()));
}
arg = stackAddr(arg.offset() + slot->offsetFromFP());
break;
}
case Arg::CallArg:
arg = stackAddr(arg.offset() - code.frameSize());
break;
default:
break;
}
}
);
}
insertionSet.execute(block);
//.........这里部分代码省略.........
示例3: generate
void generate(Code& code, CCallHelpers& jit)
{
TimingScope timingScope("Air::generate");
DisallowMacroScratchRegisterUsage disallowScratch(jit);
// And now, we generate code.
jit.emitFunctionPrologue();
if (code.frameSize())
jit.addPtr(CCallHelpers::TrustedImm32(-code.frameSize()), MacroAssembler::stackPointerRegister);
GenerationContext context;
context.code = &code;
IndexMap<BasicBlock, CCallHelpers::Label> blockLabels(code.size());
IndexMap<BasicBlock, CCallHelpers::JumpList> blockJumps(code.size());
auto link = [&] (CCallHelpers::Jump jump, BasicBlock* target) {
if (blockLabels[target].isSet()) {
jump.linkTo(blockLabels[target], &jit);
return;
}
blockJumps[target].append(jump);
};
for (BasicBlock* block : code) {
blockJumps[block].link(&jit);
blockLabels[block] = jit.label();
ASSERT(block->size() >= 1);
for (unsigned i = 0; i < block->size() - 1; ++i) {
CCallHelpers::Jump jump = block->at(i).generate(jit, context);
ASSERT_UNUSED(jump, !jump.isSet());
}
if (block->last().opcode == Jump
&& block->successorBlock(0) == code.findNextBlock(block))
continue;
if (block->last().opcode == Ret) {
// We currently don't represent the full prologue/epilogue in Air, so we need to
// have this override.
if (code.frameSize())
jit.emitFunctionEpilogue();
else
jit.emitFunctionEpilogueWithEmptyFrame();
jit.ret();
continue;
}
CCallHelpers::Jump jump = block->last().generate(jit, context);
switch (block->numSuccessors()) {
case 0:
ASSERT(!jump.isSet());
break;
case 1:
link(jump, block->successorBlock(0));
break;
case 2:
link(jump, block->successorBlock(0));
if (block->successorBlock(1) != code.findNextBlock(block))
link(jit.jump(), block->successorBlock(1));
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
}
for (auto& latePath : context.latePaths)
latePath->run(jit, context);
}
示例4: allocateStack
void allocateStack(Code& code)
{
PhaseScope phaseScope(code, "allocateStack");
// Perform an escape analysis over stack slots. An escaping stack slot is one that is locked or
// is explicitly escaped in the code.
IndexSet<StackSlot> escapingStackSlots;
for (StackSlot* slot : code.stackSlots()) {
if (slot->isLocked())
escapingStackSlots.add(slot);
}
for (BasicBlock* block : code) {
for (Inst& inst : *block) {
inst.forEachArg(
[&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
if (role == Arg::UseAddr && arg.isStack())
escapingStackSlots.add(arg.stackSlot());
});
}
}
// Allocate all of the escaped slots in order. This is kind of a crazy algorithm to allow for
// the possibility of stack slots being assigned frame offsets before we even get here.
ASSERT(!code.frameSize());
Vector<StackSlot*> assignedEscapedStackSlots;
Vector<StackSlot*> escapedStackSlotsWorklist;
for (StackSlot* slot : code.stackSlots()) {
if (escapingStackSlots.contains(slot)) {
if (slot->offsetFromFP())
assignedEscapedStackSlots.append(slot);
else
escapedStackSlotsWorklist.append(slot);
} else {
// It would be super strange to have an unlocked stack slot that has an offset already.
ASSERT(!slot->offsetFromFP());
}
}
// This is a fairly expensive loop, but it's OK because we'll usually only have a handful of
// escaped stack slots.
while (!escapedStackSlotsWorklist.isEmpty()) {
StackSlot* slot = escapedStackSlotsWorklist.takeLast();
assign(slot, assignedEscapedStackSlots);
assignedEscapedStackSlots.append(slot);
}
// Now we handle the anonymous slots.
StackSlotLiveness liveness(code);
IndexMap<StackSlot, HashSet<StackSlot*>> interference(code.stackSlots().size());
Vector<StackSlot*> slots;
for (BasicBlock* block : code) {
StackSlotLiveness::LocalCalc localCalc(liveness, block);
auto interfere = [&] (Inst& inst) {
if (verbose)
dataLog("Interfering: ", WTF::pointerListDump(localCalc.live()), "\n");
inst.forEachArg(
[&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
if (!Arg::isDef(role))
return;
if (!arg.isStack())
return;
StackSlot* slot = arg.stackSlot();
if (slot->kind() != StackSlotKind::Anonymous)
return;
for (StackSlot* otherSlot : localCalc.live()) {
interference[slot].add(otherSlot);
interference[otherSlot].add(slot);
}
});
};
for (unsigned instIndex = block->size(); instIndex--;) {
if (verbose)
dataLog("Analyzing: ", block->at(instIndex), "\n");
Inst& inst = block->at(instIndex);
interfere(inst);
localCalc.execute(instIndex);
}
Inst nop;
interfere(nop);
}
if (verbose) {
for (StackSlot* slot : code.stackSlots())
dataLog("Interference of ", pointerDump(slot), ": ", pointerListDump(interference[slot]), "\n");
}
// Now we assign stack locations. At its heart this algorithm is just first-fit. For each
// StackSlot we just want to find the offsetFromFP that is closest to zero while ensuring no
// overlap with other StackSlots that this overlaps with.
Vector<StackSlot*> otherSlots = assignedEscapedStackSlots;
for (StackSlot* slot : code.stackSlots()) {
if (slot->offsetFromFP()) {
// Already assigned an offset.
continue;
}
//.........这里部分代码省略.........
示例5: allocateStack
void allocateStack(Code& code)
{
PhaseScope phaseScope(code, "allocateStack");
// Perform an escape analysis over stack slots. An escaping stack slot is one that is locked or
// is explicitly escaped in the code.
IndexSet<StackSlot> escapingStackSlots;
for (StackSlot* slot : code.stackSlots()) {
if (slot->isLocked())
escapingStackSlots.add(slot);
}
for (BasicBlock* block : code) {
for (Inst& inst : *block) {
inst.forEachArg(
[&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
if (role == Arg::UseAddr && arg.isStack())
escapingStackSlots.add(arg.stackSlot());
});
}
}
// Allocate all of the escaped slots in order. This is kind of a crazy algorithm to allow for
// the possibility of stack slots being assigned frame offsets before we even get here.
ASSERT(!code.frameSize());
Vector<StackSlot*> assignedEscapedStackSlots;
Vector<StackSlot*> escapedStackSlotsWorklist;
for (StackSlot* slot : code.stackSlots()) {
if (escapingStackSlots.contains(slot)) {
if (slot->offsetFromFP())
assignedEscapedStackSlots.append(slot);
else
escapedStackSlotsWorklist.append(slot);
} else {
// It would be super strange to have an unlocked stack slot that has an offset already.
ASSERT(!slot->offsetFromFP());
}
}
// This is a fairly expensive loop, but it's OK because we'll usually only have a handful of
// escaped stack slots.
while (!escapedStackSlotsWorklist.isEmpty()) {
StackSlot* slot = escapedStackSlotsWorklist.takeLast();
assign(slot, assignedEscapedStackSlots);
assignedEscapedStackSlots.append(slot);
}
// Now we handle the anonymous slots.
StackSlotLiveness liveness(code);
IndexMap<StackSlot, HashSet<StackSlot*>> interference(code.stackSlots().size());
Vector<StackSlot*> slots;
for (BasicBlock* block : code) {
StackSlotLiveness::LocalCalc localCalc(liveness, block);
auto interfere = [&] (unsigned instIndex) {
if (verbose)
dataLog("Interfering: ", WTF::pointerListDump(localCalc.live()), "\n");
Inst::forEachDef<Arg>(
block->get(instIndex), block->get(instIndex + 1),
[&] (Arg& arg, Arg::Role, Arg::Type, Arg::Width) {
if (!arg.isStack())
return;
StackSlot* slot = arg.stackSlot();
if (slot->kind() != StackSlotKind::Anonymous)
return;
for (StackSlot* otherSlot : localCalc.live()) {
interference[slot].add(otherSlot);
interference[otherSlot].add(slot);
}
});
};
for (unsigned instIndex = block->size(); instIndex--;) {
if (verbose)
dataLog("Analyzing: ", block->at(instIndex), "\n");
// Kill dead stores. For simplicity we say that a store is killable if it has only late
// defs and those late defs are to things that are dead right now. We only do that
// because that's the only kind of dead stack store we will see here.
Inst& inst = block->at(instIndex);
if (!inst.hasNonArgEffects()) {
bool ok = true;
inst.forEachArg(
[&] (Arg& arg, Arg::Role role, Arg::Type, Arg::Width) {
if (Arg::isEarlyDef(role)) {
ok = false;
return;
}
if (!Arg::isLateDef(role))
return;
if (!arg.isStack()) {
ok = false;
return;
}
StackSlot* slot = arg.stackSlot();
if (slot->kind() != StackSlotKind::Anonymous) {
ok = false;
return;
}
//.........这里部分代码省略.........