本文整理汇总了C++中MachineBasicBlock::getFirstTerminator方法的典型用法代码示例。如果您正苦于以下问题:C++ MachineBasicBlock::getFirstTerminator方法的具体用法?C++ MachineBasicBlock::getFirstTerminator怎么用?C++ MachineBasicBlock::getFirstTerminator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MachineBasicBlock
的用法示例。
在下文中一共展示了MachineBasicBlock::getFirstTerminator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: emitEpilogue
void WebAssemblyFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
uint64_t StackSize = MF.getFrameInfo()->getStackSize();
if (!StackSize)
return;
const auto *TII = MF.getSubtarget().getInstrInfo();
auto &MRI = MF.getRegInfo();
unsigned OffsetReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
auto InsertPt = MBB.getFirstTerminator();
DebugLoc DL;
if (InsertPt != MBB.end()) {
DL = InsertPt->getDebugLoc();
}
// Restore the stack pointer. Without FP its value is just SP32 - stacksize
BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
.addImm(StackSize);
auto *SPSymbol = MF.createExternalSymbolName("__stack_pointer");
BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::ADD_I32), WebAssembly::SP32)
.addReg(WebAssembly::SP32)
.addReg(OffsetReg);
// Re-use OffsetReg to hold the address of the stacktop
BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
.addExternalSymbol(SPSymbol);
auto *MMO = new MachineMemOperand(MachinePointerInfo(),
MachineMemOperand::MOStore, 4, 4);
BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::STORE_I32), WebAssembly::SP32)
.addImm(0)
.addReg(OffsetReg)
.addReg(WebAssembly::SP32)
.addMemOperand(MMO);
}
示例2: emitEpilogue
void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = std::prev(MBB.end());
DebugLoc dl = MBBI->getDebugLoc();
//
// Only insert deallocframe if we need to. Also at -O0. See comment
// in emitPrologue above.
//
if (hasFP(MF) || MF.getTarget().getOptLevel() == CodeGenOpt::None) {
MachineBasicBlock::iterator MBBI = std::prev(MBB.end());
MachineBasicBlock::iterator MBBI_end = MBB.end();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
// Handle EH_RETURN.
if (MBBI->getOpcode() == Hexagon::EH_RETURN_JMPR) {
assert(MBBI->getOperand(0).isReg() && "Offset should be in register!");
BuildMI(MBB, MBBI, dl, TII.get(Hexagon::L2_deallocframe));
BuildMI(MBB, MBBI, dl, TII.get(Hexagon::A2_add),
Hexagon::R29).addReg(Hexagon::R29).addReg(Hexagon::R28);
return;
}
// Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
// versions.
if (MF.getSubtarget<HexagonSubtarget>().hasV4TOps() &&
MBBI->getOpcode() == Hexagon::JMPret && !DisableDeallocRet) {
// Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC
// instruction if we encounter it.
MachineBasicBlock::iterator BeforeJMPR =
MBB.begin() == MBBI ? MBBI : std::prev(MBBI);
if (BeforeJMPR != MBBI &&
BeforeJMPR->getOpcode() == Hexagon::RESTORE_DEALLOC_RET_JMP_V4) {
// Remove the JMPR node.
MBB.erase(MBBI);
return;
}
// Add dealloc_return.
MachineInstrBuilder MIB =
BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::L4_return));
// Transfer the function live-out registers.
MIB->copyImplicitOps(*MBB.getParent(), &*MBBI);
// Remove the JUMPR node.
MBB.erase(MBBI);
} else { // Add deallocframe for V2 and V3, and V4 tail calls.
// Check for RESTORE_DEALLOC_BEFORE_TAILCALL_V4. We don't need an extra
// DEALLOCFRAME instruction after it.
MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
MachineBasicBlock::iterator I =
Term == MBB.begin() ? MBB.end() : std::prev(Term);
if (I != MBB.end() &&
I->getOpcode() == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4)
return;
BuildMI(MBB, MBBI, dl, TII.get(Hexagon::L2_deallocframe));
}
}
}
示例3: hasTerminatorThatModifiesExec
static bool hasTerminatorThatModifiesExec(const MachineBasicBlock &MBB,
const TargetRegisterInfo &TRI) {
for (MachineBasicBlock::const_iterator I = MBB.getFirstTerminator(),
E = MBB.end(); I != E; ++I) {
if (I->modifiesRegister(AMDGPU::EXEC, &TRI))
return true;
}
return false;
}
示例4: enterIntvAtEnd
/// enterIntvAtEnd - Enter openli at the end of MBB.
/// PhiMBB is a successor inside openli where a PHI value is created.
/// Currently, all entries must share the same PhiMBB.
void SplitEditor::enterIntvAtEnd(MachineBasicBlock &A, MachineBasicBlock &B) {
assert(openli_ && "openIntv not called before enterIntvAtEnd");
SlotIndex EndA = lis_.getMBBEndIdx(&A);
VNInfo *CurVNIA = curli_->getVNInfoAt(EndA.getPrevIndex());
if (!CurVNIA) {
DEBUG(dbgs() << " enterIntvAtEnd, curli not live out of BB#"
<< A.getNumber() << ".\n");
return;
}
// Add a phi kill value and live range out of A.
VNInfo *VNIA = insertCopy(*openli_, A, A.getFirstTerminator());
openli_->addRange(LiveRange(VNIA->def, EndA, VNIA));
// FIXME: If this is the only entry edge, we don't need the extra PHI value.
// FIXME: If there are multiple entry blocks (so not a loop), we need proper
// SSA update.
// Now look at the start of B.
SlotIndex StartB = lis_.getMBBStartIdx(&B);
SlotIndex EndB = lis_.getMBBEndIdx(&B);
const LiveRange *CurB = curli_->getLiveRangeContaining(StartB);
if (!CurB) {
DEBUG(dbgs() << " enterIntvAtEnd: curli not live in to BB#"
<< B.getNumber() << ".\n");
return;
}
VNInfo *VNIB = openli_->getVNInfoAt(StartB);
if (!VNIB) {
// Create a phi value.
VNIB = openli_->getNextValue(SlotIndex(StartB, true), 0, false,
lis_.getVNInfoAllocator());
VNIB->setIsPHIDef(true);
VNInfo *&mapVNI = valueMap_[CurB->valno];
if (mapVNI) {
// Multiple copies - must create PHI value.
abort();
} else {
// This is the first copy of dupLR. Mark the mapping.
mapVNI = VNIB;
}
}
DEBUG(dbgs() << " enterIntvAtEnd: " << *openli_ << '\n');
}
示例5: allocateBasicBlock
void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
this->MBB = &MBB;
LLVM_DEBUG(dbgs() << "\nAllocating " << MBB);
PhysRegState.assign(TRI->getNumRegs(), regDisabled);
assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
MachineBasicBlock::iterator MII = MBB.begin();
// Add live-in registers as live.
for (const MachineBasicBlock::RegisterMaskPair LI : MBB.liveins())
if (MRI->isAllocatable(LI.PhysReg))
definePhysReg(MII, LI.PhysReg, regReserved);
VirtDead.clear();
Coalesced.clear();
// Otherwise, sequentially allocate each instruction in the MBB.
for (MachineInstr &MI : MBB) {
LLVM_DEBUG(
dbgs() << "\n>> " << MI << "Regs:";
dumpState()
);
// Special handling for debug values. Note that they are not allowed to
// affect codegen of the other instructions in any way.
if (MI.isDebugValue()) {
handleDebugValue(MI);
continue;
}
allocateInstruction(MI);
}
// Spill all physical registers holding virtual registers now.
LLVM_DEBUG(dbgs() << "Spilling live registers at end of block.\n");
spillAll(MBB.getFirstTerminator());
// Erase all the coalesced copies. We are delaying it until now because
// LiveVirtRegs might refer to the instrs.
for (MachineInstr *MI : Coalesced)
MBB.erase(MI);
NumCoalesced += Coalesced.size();
LLVM_DEBUG(MBB.dump());
}
示例6: HoistPostRA
/// When an instruction is found to only use loop invariant operands that is
/// safe to hoist, this instruction is called to do the dirty work.
void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
MachineBasicBlock *Preheader = getCurPreheader();
// Now move the instructions to the predecessor, inserting it before any
// terminator instructions.
DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
<< MI->getParent()->getNumber() << ": " << *MI);
// Splice the instruction to the preheader.
MachineBasicBlock *MBB = MI->getParent();
Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
// Add register to livein list to all the BBs in the current loop since a
// loop invariant must be kept live throughout the whole loop. This is
// important to ensure later passes do not scavenge the def register.
AddToLiveIns(Def);
++NumPostRAHoisted;
Changed = true;
}
示例7: SkipPHIsAndLabels
// FindCopyInsertPoint - Find a safe place in MBB to insert a copy from SrcReg
// when following the CFG edge to SuccMBB. This needs to be after any def of
// SrcReg, but before any subsequent point where control flow might jump out of
// the basic block.
MachineBasicBlock::iterator
llvm::PHIElimination::FindCopyInsertPoint(MachineBasicBlock &MBB,
MachineBasicBlock &SuccMBB,
unsigned SrcReg) {
// Handle the trivial case trivially.
if (MBB.empty())
return MBB.begin();
// Usually, we just want to insert the copy before the first terminator
// instruction. However, for the edge going to a landing pad, we must insert
// the copy before the call/invoke instruction.
if (!SuccMBB.isLandingPad())
return MBB.getFirstTerminator();
// Discover any defs/uses in this basic block.
SmallPtrSet<MachineInstr*, 8> DefUsesInMBB;
for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg),
RE = MRI->reg_end(); RI != RE; ++RI) {
MachineInstr *DefUseMI = &*RI;
if (DefUseMI->getParent() == &MBB)
DefUsesInMBB.insert(DefUseMI);
}
MachineBasicBlock::iterator InsertPoint;
if (DefUsesInMBB.empty()) {
// No defs. Insert the copy at the start of the basic block.
InsertPoint = MBB.begin();
} else if (DefUsesInMBB.size() == 1) {
// Insert the copy immediately after the def/use.
InsertPoint = *DefUsesInMBB.begin();
++InsertPoint;
} else {
// Insert the copy immediately after the last def/use.
InsertPoint = MBB.end();
while (!DefUsesInMBB.count(&*--InsertPoint)) {}
++InsertPoint;
}
// Make sure the copy goes after any phi nodes however.
return SkipPHIsAndLabels(MBB, InsertPoint);
}
示例8: SkipPHIsAndLabels
// FindCopyInsertPoint - Find a safe place in MBB to insert a copy from SrcReg.
// This needs to be after any def or uses of SrcReg, but before any subsequent
// point where control flow might jump out of the basic block.
MachineBasicBlock::iterator
llvm::PHIElimination::FindCopyInsertPoint(MachineBasicBlock &MBB,
unsigned SrcReg) {
// Handle the trivial case trivially.
if (MBB.empty())
return MBB.begin();
// If this basic block does not contain an invoke, then control flow always
// reaches the end of it, so place the copy there. The logic below works in
// this case too, but is more expensive.
if (!isa<InvokeInst>(MBB.getBasicBlock()->getTerminator()))
return MBB.getFirstTerminator();
// Discover any definition/uses in this basic block.
SmallPtrSet<MachineInstr*, 8> DefUsesInMBB;
for (MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(SrcReg),
RE = MRI->reg_end(); RI != RE; ++RI) {
MachineInstr *DefUseMI = &*RI;
if (DefUseMI->getParent() == &MBB)
DefUsesInMBB.insert(DefUseMI);
}
MachineBasicBlock::iterator InsertPoint;
if (DefUsesInMBB.empty()) {
// No def/uses. Insert the copy at the start of the basic block.
InsertPoint = MBB.begin();
} else if (DefUsesInMBB.size() == 1) {
// Insert the copy immediately after the definition/use.
InsertPoint = *DefUsesInMBB.begin();
++InsertPoint;
} else {
// Insert the copy immediately after the last definition/use.
InsertPoint = MBB.end();
while (!DefUsesInMBB.count(&*--InsertPoint)) {}
++InsertPoint;
}
// Make sure the copy goes after any phi nodes however.
return SkipPHIsAndLabels(MBB, InsertPoint);
}
示例9: emitEpilogue
void WebAssemblyFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
auto &MFI = MF.getFrameInfo();
uint64_t StackSize = MFI.getStackSize();
if (!needsSP(MF, MFI) || !needsSPWriteback(MF, MFI)) return;
const auto *TII = MF.getSubtarget<WebAssemblySubtarget>().getInstrInfo();
auto &MRI = MF.getRegInfo();
auto InsertPt = MBB.getFirstTerminator();
DebugLoc DL;
if (InsertPt != MBB.end())
DL = InsertPt->getDebugLoc();
// Restore the stack pointer. If we had fixed-size locals, add the offset
// subtracted in the prolog.
unsigned SPReg = 0;
MachineBasicBlock::iterator InsertAddr = InsertPt;
if (hasBP(MF)) {
auto FI = MF.getInfo<WebAssemblyFunctionInfo>();
SPReg = FI->getBasePointerVreg();
} else if (StackSize) {
const TargetRegisterClass *PtrRC =
MRI.getTargetRegisterInfo()->getPointerRegClass(MF);
unsigned OffsetReg = MRI.createVirtualRegister(PtrRC);
InsertAddr =
BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::CONST_I32), OffsetReg)
.addImm(StackSize);
// In the epilog we don't need to write the result back to the SP32 physreg
// because it won't be used again. We can use a stackified register instead.
SPReg = MRI.createVirtualRegister(PtrRC);
BuildMI(MBB, InsertPt, DL, TII->get(WebAssembly::ADD_I32), SPReg)
.addReg(hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32)
.addReg(OffsetReg);
} else {
SPReg = hasFP(MF) ? WebAssembly::FP32 : WebAssembly::SP32;
}
writeSPToMemory(SPReg, MF, MBB, InsertAddr, InsertPt, DL);
}
示例10: insertCSRRestores
/// Insert restore code for the callee-saved registers used in the function.
static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
std::vector<CalleeSavedInfo> &CSI) {
MachineFunction &MF = *RestoreBlock.getParent();
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
// Restore all registers immediately before the return and any
// terminators that precede it.
MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator();
if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
for (const CalleeSavedInfo &CI : reverse(CSI)) {
unsigned Reg = CI.getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI);
assert(I != RestoreBlock.begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert
// multiple instructions.
}
}
}
示例11: runOnMachineFunction
bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) {
MDT = &getAnalysis<MachineDominatorTree>();
// Verify that all bdnz/bdz instructions are dominated by a loop mtctr before
// any other instructions that might clobber the ctr register.
for (MachineFunction::iterator I = MF.begin(), IE = MF.end();
I != IE; ++I) {
MachineBasicBlock *MBB = I;
if (!MDT->isReachableFromEntry(MBB))
continue;
for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(),
MIIE = MBB->end(); MII != MIIE; ++MII) {
unsigned Opc = MII->getOpcode();
if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ ||
Opc == PPC::BDZ8 || Opc == PPC::BDZ)
if (!verifyCTRBranch(MBB, MII))
llvm_unreachable("Invalid PPC CTR loop!");
}
}
return false;
}
示例12: splitMBB
/// Splits a MachineBasicBlock to branch before \p SplitBefore. The original
/// branch is \p OrigBranch. The target of the new branch can either be the same
/// as the target of the original branch or the fallthrough successor of the
/// original block as determined by \p BranchToFallThrough. The branch
/// conditions will be inverted according to \p InvertNewBranch and
/// \p InvertOrigBranch. If an instruction that previously fed the branch is to
/// be deleted, it is provided in \p MIToDelete and \p NewCond will be used as
/// the branch condition. The branch probabilities will be set if the
/// MachineBranchProbabilityInfo isn't null.
static bool splitMBB(BlockSplitInfo &BSI) {
assert(BSI.allInstrsInSameMBB() &&
"All instructions must be in the same block.");
MachineBasicBlock *ThisMBB = BSI.OrigBranch->getParent();
MachineFunction *MF = ThisMBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
assert(MRI->isSSA() && "Can only do this while the function is in SSA form.");
if (ThisMBB->succ_size() != 2) {
LLVM_DEBUG(
dbgs() << "Don't know how to handle blocks that don't have exactly"
<< " two successors.\n");
return false;
}
const PPCInstrInfo *TII = MF->getSubtarget<PPCSubtarget>().getInstrInfo();
unsigned OrigBROpcode = BSI.OrigBranch->getOpcode();
unsigned InvertedOpcode =
OrigBROpcode == PPC::BC
? PPC::BCn
: OrigBROpcode == PPC::BCn
? PPC::BC
: OrigBROpcode == PPC::BCLR ? PPC::BCLRn : PPC::BCLR;
unsigned NewBROpcode = BSI.InvertNewBranch ? InvertedOpcode : OrigBROpcode;
MachineBasicBlock *OrigTarget = BSI.OrigBranch->getOperand(1).getMBB();
MachineBasicBlock *OrigFallThrough = OrigTarget == *ThisMBB->succ_begin()
? *ThisMBB->succ_rbegin()
: *ThisMBB->succ_begin();
MachineBasicBlock *NewBRTarget =
BSI.BranchToFallThrough ? OrigFallThrough : OrigTarget;
BranchProbability ProbToNewTarget =
!BSI.MBPI ? BranchProbability::getUnknown()
: BSI.MBPI->getEdgeProbability(ThisMBB, NewBRTarget);
// Create a new basic block.
MachineBasicBlock::iterator InsertPoint = BSI.SplitBefore;
const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
MachineFunction::iterator It = ThisMBB->getIterator();
MachineBasicBlock *NewMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MF->insert(++It, NewMBB);
// Move everything after SplitBefore into the new block.
NewMBB->splice(NewMBB->end(), ThisMBB, InsertPoint, ThisMBB->end());
NewMBB->transferSuccessors(ThisMBB);
// Add the two successors to ThisMBB. The probabilities come from the
// existing blocks if available.
ThisMBB->addSuccessor(NewBRTarget, ProbToNewTarget);
ThisMBB->addSuccessor(NewMBB, ProbToNewTarget.getCompl());
// Add the branches to ThisMBB.
BuildMI(*ThisMBB, ThisMBB->end(), BSI.SplitBefore->getDebugLoc(),
TII->get(NewBROpcode))
.addReg(BSI.SplitCond->getOperand(0).getReg())
.addMBB(NewBRTarget);
BuildMI(*ThisMBB, ThisMBB->end(), BSI.SplitBefore->getDebugLoc(),
TII->get(PPC::B))
.addMBB(NewMBB);
if (BSI.MIToDelete)
BSI.MIToDelete->eraseFromParent();
// Change the condition on the original branch and invert it if requested.
auto FirstTerminator = NewMBB->getFirstTerminator();
if (BSI.NewCond) {
assert(FirstTerminator->getOperand(0).isReg() &&
"Can't update condition of unconditional branch.");
FirstTerminator->getOperand(0).setReg(BSI.NewCond->getOperand(0).getReg());
}
if (BSI.InvertOrigBranch)
FirstTerminator->setDesc(TII->get(InvertedOpcode));
// If any of the PHIs in the successors of NewMBB reference values that
// now come from NewMBB, they need to be updated.
for (auto *Succ : NewMBB->successors()) {
updatePHIs(Succ, ThisMBB, NewMBB, MRI);
}
addIncomingValuesToPHIs(NewBRTarget, ThisMBB, NewMBB, MRI);
LLVM_DEBUG(dbgs() << "After splitting, ThisMBB:\n"; ThisMBB->dump());
LLVM_DEBUG(dbgs() << "NewMBB:\n"; NewMBB->dump());
LLVM_DEBUG(dbgs() << "New branch-to block:\n"; NewBRTarget->dump());
return true;
}
示例13: emitEpilogue
/// Insert epilog code into the function.
/// For ARC, this inserts a call to a function that restores callee saved
/// registers onto the stack, when enough callee saved registers are required.
void ARCFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
LLVM_DEBUG(dbgs() << "Emit Epilogue: " << MF.getName() << "\n");
auto *AFI = MF.getInfo<ARCFunctionInfo>();
const ARCInstrInfo *TII = MF.getSubtarget<ARCSubtarget>().getInstrInfo();
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
MachineFrameInfo &MFI = MF.getFrameInfo();
uint64_t StackSize = MF.getFrameInfo().getStackSize();
bool SavedBlink = false;
unsigned AmountAboveFunclet = 0;
// If we have variable sized frame objects, then we have to move
// the stack pointer to a known spot (fp - StackSize).
// Then, replace the frame pointer by (new) [sp,StackSize-4].
// Then, move the stack pointer the rest of the way (sp = sp + StackSize).
if (hasFP(MF)) {
BuildMI(MBB, MBBI, DebugLoc(), TII->get(ARC::SUB_rru6), ARC::SP)
.addReg(ARC::FP)
.addImm(StackSize);
AmountAboveFunclet += 4;
}
// Now, move the stack pointer to the bottom of the save area for the funclet.
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
unsigned Last = determineLastCalleeSave(CSI);
unsigned StackSlotsUsedByFunclet = 0;
// Now, restore the callee save registers.
if (UseSaveRestoreFunclet && Last > ARC::R14) {
// BL to __ld_r13_to_<TRI->getRegAsmName()>
StackSlotsUsedByFunclet = Last - ARC::R12;
AmountAboveFunclet += 4 * (StackSlotsUsedByFunclet + 1);
SavedBlink = true;
}
if (MFI.hasCalls() && !SavedBlink) {
AmountAboveFunclet += 4;
SavedBlink = true;
}
// Move the stack pointer up to the point of the funclet.
if (StackSize - AmountAboveFunclet) {
BuildMI(MBB, MBBI, MBB.findDebugLoc(MBBI), TII->get(ARC::ADD_rru6))
.addReg(ARC::SP)
.addReg(ARC::SP)
.addImm(StackSize - AmountAboveFunclet);
}
if (StackSlotsUsedByFunclet) {
BuildMI(MBB, MBBI, MBB.findDebugLoc(MBBI), TII->get(ARC::BL))
.addExternalSymbol(load_funclet_name[Last - ARC::R15])
.addReg(ARC::BLINK, RegState::Implicit | RegState::Kill);
BuildMI(MBB, MBBI, MBB.findDebugLoc(MBBI), TII->get(ARC::ADD_rru6))
.addReg(ARC::SP)
.addReg(ARC::SP)
.addImm(4 * (StackSlotsUsedByFunclet));
}
// Now, pop blink if necessary.
if (SavedBlink) {
BuildMI(MBB, MBBI, MBB.findDebugLoc(MBBI), TII->get(ARC::POP_S_BLINK));
}
// Now, pop fp if necessary.
if (hasFP(MF)) {
BuildMI(MBB, MBBI, MBB.findDebugLoc(MBBI), TII->get(ARC::LD_AB_rs9))
.addReg(ARC::SP, RegState::Define)
.addReg(ARC::FP, RegState::Define)
.addReg(ARC::SP)
.addImm(4);
}
// Relieve the varargs area if necessary.
if (MF.getFunction().isVarArg()) {
// Add in the varargs area here first.
LLVM_DEBUG(dbgs() << "Varargs\n");
unsigned VarArgsBytes = MFI.getObjectSize(AFI->getVarArgsFrameIndex());
BuildMI(MBB, MBBI, MBB.findDebugLoc(MBBI), TII->get(ARC::ADD_rru6))
.addReg(ARC::SP)
.addReg(ARC::SP)
.addImm(VarArgsBytes);
}
}
示例14: PlaceBlockMarker
/// Insert a BLOCK marker for branches to MBB (if needed).
static void PlaceBlockMarker(MachineBasicBlock &MBB, MachineFunction &MF,
SmallVectorImpl<MachineBasicBlock *> &ScopeTops,
const WebAssemblyInstrInfo &TII,
const MachineLoopInfo &MLI,
MachineDominatorTree &MDT,
WebAssemblyFunctionInfo &MFI) {
// First compute the nearest common dominator of all forward non-fallthrough
// predecessors so that we minimize the time that the BLOCK is on the stack,
// which reduces overall stack height.
MachineBasicBlock *Header = nullptr;
bool IsBranchedTo = false;
int MBBNumber = MBB.getNumber();
for (MachineBasicBlock *Pred : MBB.predecessors())
if (Pred->getNumber() < MBBNumber) {
Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred;
if (ExplicitlyBranchesTo(Pred, &MBB))
IsBranchedTo = true;
}
if (!Header)
return;
if (!IsBranchedTo)
return;
assert(&MBB != &MF.front() && "Header blocks shouldn't have predecessors");
MachineBasicBlock *LayoutPred = &*prev(MachineFunction::iterator(&MBB));
// If the nearest common dominator is inside a more deeply nested context,
// walk out to the nearest scope which isn't more deeply nested.
for (MachineFunction::iterator I(LayoutPred), E(Header); I != E; --I) {
if (MachineBasicBlock *ScopeTop = ScopeTops[I->getNumber()]) {
if (ScopeTop->getNumber() > Header->getNumber()) {
// Skip over an intervening scope.
I = next(MachineFunction::iterator(ScopeTop));
} else {
// We found a scope level at an appropriate depth.
Header = ScopeTop;
break;
}
}
}
// If there's a loop which ends just before MBB which contains Header, we can
// reuse its label instead of inserting a new BLOCK.
for (MachineLoop *Loop = MLI.getLoopFor(LayoutPred);
Loop && Loop->contains(LayoutPred); Loop = Loop->getParentLoop())
if (Loop && LoopBottom(Loop) == LayoutPred && Loop->contains(Header))
return;
// Decide where in Header to put the BLOCK.
MachineBasicBlock::iterator InsertPos;
MachineLoop *HeaderLoop = MLI.getLoopFor(Header);
if (HeaderLoop && MBB.getNumber() > LoopBottom(HeaderLoop)->getNumber()) {
// Header is the header of a loop that does not lexically contain MBB, so
// the BLOCK needs to be above the LOOP, after any END constructs.
InsertPos = Header->begin();
while (InsertPos->getOpcode() != WebAssembly::LOOP)
++InsertPos;
} else {
// Otherwise, insert the BLOCK as late in Header as we can, but before the
// beginning of the local expression tree and any nested BLOCKs.
InsertPos = Header->getFirstTerminator();
while (InsertPos != Header->begin() && IsChild(prev(InsertPos), MFI) &&
prev(InsertPos)->getOpcode() != WebAssembly::LOOP &&
prev(InsertPos)->getOpcode() != WebAssembly::END_BLOCK &&
prev(InsertPos)->getOpcode() != WebAssembly::END_LOOP)
--InsertPos;
}
// Add the BLOCK.
BuildMI(*Header, InsertPos, DebugLoc(), TII.get(WebAssembly::BLOCK));
// Mark the end of the block.
InsertPos = MBB.begin();
while (InsertPos != MBB.end() &&
InsertPos->getOpcode() == WebAssembly::END_LOOP)
++InsertPos;
BuildMI(MBB, InsertPos, DebugLoc(), TII.get(WebAssembly::END_BLOCK));
// Track the farthest-spanning scope that ends at this point.
int Number = MBB.getNumber();
if (!ScopeTops[Number] ||
ScopeTops[Number]->getNumber() > Header->getNumber())
ScopeTops[Number] = Header;
}
示例15: while
void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
MachineFrameInfo *MFI = MF.getFrameInfo();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL;
bool IsTailCallReturn = false;
if (MBB.end() != MBBI) {
DL = MBBI->getDebugLoc();
unsigned RetOpcode = MBBI->getOpcode();
IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
RetOpcode == AArch64::TCRETURNri;
}
int NumBytes = MFI->getStackSize();
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
return;
// Initial and residual are named for consistency with the prologue. Note that
// in the epilogue, the residual adjustment is executed first.
uint64_t ArgumentPopSize = 0;
if (IsTailCallReturn) {
MachineOperand &StackAdjust = MBBI->getOperand(1);
// For a tail-call in a callee-pops-arguments environment, some or all of
// the stack may actually be in use for the call's arguments, this is
// calculated during LowerCall and consumed here...
ArgumentPopSize = StackAdjust.getImm();
} else {
// ... otherwise the amount to pop is *all* of the argument space,
// conveniently stored in the MachineFunctionInfo by
// LowerFormalArguments. This will, of course, be zero for the C calling
// convention.
ArgumentPopSize = AFI->getArgumentStackToRestore();
}
// The stack frame should be like below,
//
// ---------------------- ---
// | | |
// | BytesInStackArgArea| CalleeArgStackSize
// | (NumReusableBytes) | (of tail call)
// | | ---
// | | |
// ---------------------| --- |
// | | | |
// | CalleeSavedReg | | |
// | (NumRestores * 8) | | |
// | | | |
// ---------------------| | NumBytes
// | | StackSize (StackAdjustUp)
// | LocalStackSize | | |
// | (covering callee | | |
// | args) | | |
// | | | |
// ---------------------- --- ---
//
// So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize
// = StackSize + ArgumentPopSize
//
// AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
// it as the 2nd argument of AArch64ISD::TC_RETURN.
NumBytes += ArgumentPopSize;
unsigned NumRestores = 0;
// Move past the restores of the callee-saved registers.
MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator();
const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
MachineBasicBlock::iterator Begin = MBB.begin();
while (LastPopI != Begin) {
--LastPopI;
unsigned Restores = getNumCSRestores(*LastPopI, CSRegs);
NumRestores += Restores;
if (Restores == 0) {
++LastPopI;
break;
}
}
NumBytes -= NumRestores * 8;
assert(NumBytes >= 0 && "Negative stack allocation size!?");
if (!hasFP(MF)) {
// If this was a redzone leaf function, we don't need to restore the
// stack pointer.
if (!canUseRedZone(MF))
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes,
TII);
return;
}
// Restore the original stack pointer.
// FIXME: Rather than doing the math here, we should instead just use
// non-post-indexed loads for the restores if we aren't actually going to
// be able to save any instructions.
if (NumBytes || MFI->hasVarSizedObjects())
//.........这里部分代码省略.........