本文整理汇总了C++中MachineBasicBlock::begin方法的典型用法代码示例。如果您正苦于以下问题:C++ MachineBasicBlock::begin方法的具体用法?C++ MachineBasicBlock::begin怎么用?C++ MachineBasicBlock::begin使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MachineBasicBlock
的用法示例。
在下文中一共展示了MachineBasicBlock::begin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: hasFP
void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.begin();
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
bool HasFP = hasFP(MF);
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
DebugLoc DL;
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
return;
int NumBytes = (int)MFI->getStackSize();
if (!AFI->hasStackFrame()) {
assert(!HasFP && "unexpected function without stack frame but with FP");
// All of the stack allocation is for locals.
AFI->setLocalStackSize(NumBytes);
// Label used to tie together the PROLOG_LABEL and the MachineMoves.
MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
// REDZONE: If the stack size is less than 128 bytes, we don't need
// to actually allocate.
if (NumBytes && !canUseRedZone(MF)) {
emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
MachineInstr::FrameSetup);
// Encode the stack size of the leaf function.
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex)
.setMIFlags(MachineInstr::FrameSetup);
} else if (NumBytes) {
++NumRedZoneFunctions;
}
return;
}
// Only set up FP if we actually need to.
int FPOffset = 0;
if (HasFP)
// Frame pointer is fp = sp - 16.
FPOffset = AFI->getCalleeSavedStackSize() - 16;
// Move past the saves of the callee-saved registers.
MachineBasicBlock::iterator End = MBB.end();
while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup))
++MBBI;
NumBytes -= AFI->getCalleeSavedStackSize();
assert(NumBytes >= 0 && "Negative stack allocation size!?");
if (HasFP) {
// Issue sub fp, sp, FPOffset or
// mov fp,sp when FPOffset is zero.
// Note: All stores of callee-saved registers are marked as "FrameSetup".
// This code marks the instruction(s) that set the FP also.
emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII,
MachineInstr::FrameSetup);
}
// All of the remaining stack allocations are for locals.
AFI->setLocalStackSize(NumBytes);
// Allocate space for the rest of the frame.
const unsigned Alignment = MFI->getMaxAlignment();
const bool NeedsRealignment = RegInfo->needsStackRealignment(MF);
unsigned scratchSPReg = AArch64::SP;
if (NumBytes && NeedsRealignment) {
scratchSPReg = findScratchNonCalleeSaveRegister(&MBB);
assert(scratchSPReg != AArch64::NoRegister);
}
// If we're a leaf function, try using the red zone.
if (NumBytes && !canUseRedZone(MF))
// FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
// the correct value here, as NumBytes also includes padding bytes,
// which shouldn't be counted here.
emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP, -NumBytes, TII,
MachineInstr::FrameSetup);
if (NumBytes && NeedsRealignment) {
const unsigned NrBitsToZero = countTrailingZeros(Alignment);
assert(NrBitsToZero > 1);
assert(scratchSPReg != AArch64::SP);
// SUB X9, SP, NumBytes
// -- X9 is temporary register, so shouldn't contain any live data here,
//.........这里部分代码省略.........
示例2: LowerAtomicPHINode
/// LowerAtomicPHINode - Lower the PHI node at the top of the specified block,
/// under the assuption that it needs to be lowered in a way that supports
/// atomic execution of PHIs. This lowering method is always correct all of the
/// time.
///
void PHIElimination::LowerAtomicPHINode(
MachineBasicBlock &MBB,
MachineBasicBlock::iterator AfterPHIsIt) {
++NumAtomic;
// Unlink the PHI node from the basic block, but don't delete the PHI yet.
MachineInstr *MPhi = MBB.remove(MBB.begin());
unsigned NumSrcs = (MPhi->getNumOperands() - 1) / 2;
unsigned DestReg = MPhi->getOperand(0).getReg();
assert(MPhi->getOperand(0).getSubReg() == 0 && "Can't handle sub-reg PHIs");
bool isDead = MPhi->getOperand(0).isDead();
// Create a new register for the incoming PHI arguments.
MachineFunction &MF = *MBB.getParent();
unsigned IncomingReg = 0;
bool reusedIncoming = false; // Is IncomingReg reused from an earlier PHI?
// Insert a register to register copy at the top of the current block (but
// after any remaining phi nodes) which copies the new incoming register
// into the phi node destination.
const TargetInstrInfo *TII = MF.getTarget().getInstrInfo();
if (isSourceDefinedByImplicitDef(MPhi, MRI))
// If all sources of a PHI node are implicit_def, just emit an
// implicit_def instead of a copy.
BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
else {
// Can we reuse an earlier PHI node? This only happens for critical edges,
// typically those created by tail duplication.
unsigned &entry = LoweredPHIs[MPhi];
if (entry) {
// An identical PHI node was already lowered. Reuse the incoming register.
IncomingReg = entry;
reusedIncoming = true;
++NumReused;
DEBUG(dbgs() << "Reusing " << PrintReg(IncomingReg) << " for " << *MPhi);
} else {
const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
}
BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
TII->get(TargetOpcode::COPY), DestReg)
.addReg(IncomingReg);
}
// Update live variable information if there is any.
LiveVariables *LV = getAnalysisIfAvailable<LiveVariables>();
if (LV) {
MachineInstr *PHICopy = prior(AfterPHIsIt);
if (IncomingReg) {
LiveVariables::VarInfo &VI = LV->getVarInfo(IncomingReg);
// Increment use count of the newly created virtual register.
VI.NumUses++;
LV->setPHIJoin(IncomingReg);
// When we are reusing the incoming register, it may already have been
// killed in this block. The old kill will also have been inserted at
// AfterPHIsIt, so it appears before the current PHICopy.
if (reusedIncoming)
if (MachineInstr *OldKill = VI.findKill(&MBB)) {
DEBUG(dbgs() << "Remove old kill from " << *OldKill);
LV->removeVirtualRegisterKilled(IncomingReg, OldKill);
DEBUG(MBB.dump());
}
// Add information to LiveVariables to know that the incoming value is
// killed. Note that because the value is defined in several places (once
// each for each incoming block), the "def" block and instruction fields
// for the VarInfo is not filled in.
LV->addVirtualRegisterKilled(IncomingReg, PHICopy);
}
// Since we are going to be deleting the PHI node, if it is the last use of
// any registers, or if the value itself is dead, we need to move this
// information over to the new copy we just inserted.
LV->removeVirtualRegistersKilled(MPhi);
// If the result is dead, update LV.
if (isDead) {
LV->addVirtualRegisterDead(DestReg, PHICopy);
LV->removeVirtualRegisterDead(DestReg, MPhi);
}
}
// Adjust the VRegPHIUseCount map to account for the removal of this PHI node.
for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2)
--VRegPHIUseCount[BBVRegPair(MPhi->getOperand(i+1).getMBB()->getNumber(),
MPhi->getOperand(i).getReg())];
// Now loop over all of the incoming arguments, changing them to copy into the
// IncomingReg register in the corresponding predecessor basic block.
SmallPtrSet<MachineBasicBlock*, 8> MBBsInsertedInto;
for (int i = NumSrcs - 1; i >= 0; --i) {
//.........这里部分代码省略.........
示例3: insertCSRSpillsAndRestores
/// insertCSRSpillsAndRestores - Insert spill and restore code for
/// callee saved registers used in the function, handling shrink wrapping.
///
void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Get callee saved register information.
MachineFrameInfo *MFI = Fn.getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
MFI->setCalleeSavedInfoValid(true);
// Early exit if no callee saved registers are modified!
if (CSI.empty())
return;
const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering();
const TargetRegisterInfo *TRI = Fn.getTarget().getRegisterInfo();
MachineBasicBlock::iterator I;
if (!ShrinkWrapThisFunction) {
// Spill using target interface.
I = EntryBlock->begin();
if (!TFI->spillCalleeSavedRegisters(*EntryBlock, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
// Add the callee-saved register as live-in.
// It's killed at the spill.
EntryBlock->addLiveIn(CSI[i].getReg());
// Insert the spill to the stack frame.
unsigned Reg = CSI[i].getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.storeRegToStackSlot(*EntryBlock, I, Reg, true,
CSI[i].getFrameIdx(), RC, TRI);
}
}
// Restore using target interface.
for (unsigned ri = 0, re = ReturnBlocks.size(); ri != re; ++ri) {
MachineBasicBlock* MBB = ReturnBlocks[ri];
I = MBB->end(); --I;
// Skip over all terminator instructions, which are part of the return
// sequence.
MachineBasicBlock::iterator I2 = I;
while (I2 != MBB->begin() && (--I2)->isTerminator())
I = I2;
bool AtStart = I == MBB->begin();
MachineBasicBlock::iterator BeforeI = I;
if (!AtStart)
--BeforeI;
// Restore all registers immediately before the return and any
// terminators that precede it.
if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.loadRegFromStackSlot(*MBB, I, Reg,
CSI[i].getFrameIdx(),
RC, TRI);
assert(I != MBB->begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert
// multiple instructions.
if (AtStart)
I = MBB->begin();
else {
I = BeforeI;
++I;
}
}
}
}
return;
}
// Insert spills.
std::vector<CalleeSavedInfo> blockCSI;
for (CSRegBlockMap::iterator BI = CSRSave.begin(),
BE = CSRSave.end(); BI != BE; ++BI) {
MachineBasicBlock* MBB = BI->first;
CSRegSet save = BI->second;
if (save.empty())
continue;
blockCSI.clear();
for (CSRegSet::iterator RI = save.begin(),
RE = save.end(); RI != RE; ++RI) {
blockCSI.push_back(CSI[*RI]);
}
assert(blockCSI.size() > 0 &&
"Could not collect callee saved register info");
I = MBB->begin();
// When shrink wrapping, use stack slot stores/loads.
for (unsigned i = 0, e = blockCSI.size(); i != e; ++i) {
// Add the callee-saved register as live-in.
//.........这里部分代码省略.........
示例4: AnalyzeBranch
// Branch analysis.
// Note: If the condition register is set to CTR or CTR8 then this is a
// BDNZ (imm == 1) or BDZ (imm == 0) branch.
bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
bool isPPC64 = TM.getSubtargetImpl()->isPPC64();
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin())
return false;
--I;
while (I->isDebugValue()) {
if (I == MBB.begin())
return false;
--I;
}
if (!isUnpredicatedTerminator(I))
return false;
// Get the last instruction in the block.
MachineInstr *LastInst = I;
// If there is only one terminator instruction, process it.
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
if (LastInst->getOpcode() == PPC::B) {
if (!LastInst->getOperand(0).isMBB())
return true;
TBB = LastInst->getOperand(0).getMBB();
return false;
} else if (LastInst->getOpcode() == PPC::BCC) {
if (!LastInst->getOperand(2).isMBB())
return true;
// Block ends with fall-through condbranch.
TBB = LastInst->getOperand(2).getMBB();
Cond.push_back(LastInst->getOperand(0));
Cond.push_back(LastInst->getOperand(1));
return false;
} else if (LastInst->getOpcode() == PPC::BDNZ8 ||
LastInst->getOpcode() == PPC::BDNZ) {
if (!LastInst->getOperand(0).isMBB())
return true;
if (DisableCTRLoopAnal)
return true;
TBB = LastInst->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(1));
Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
true));
return false;
} else if (LastInst->getOpcode() == PPC::BDZ8 ||
LastInst->getOpcode() == PPC::BDZ) {
if (!LastInst->getOperand(0).isMBB())
return true;
if (DisableCTRLoopAnal)
return true;
TBB = LastInst->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(0));
Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
true));
return false;
}
// Otherwise, don't know what this is.
return true;
}
// Get the instruction before it if it's a terminator.
MachineInstr *SecondLastInst = I;
// If there are three terminators, we don't know what sort of block this is.
if (SecondLastInst && I != MBB.begin() &&
isUnpredicatedTerminator(--I))
return true;
// If the block ends with PPC::B and PPC:BCC, handle it.
if (SecondLastInst->getOpcode() == PPC::BCC &&
LastInst->getOpcode() == PPC::B) {
if (!SecondLastInst->getOperand(2).isMBB() ||
!LastInst->getOperand(0).isMBB())
return true;
TBB = SecondLastInst->getOperand(2).getMBB();
Cond.push_back(SecondLastInst->getOperand(0));
Cond.push_back(SecondLastInst->getOperand(1));
FBB = LastInst->getOperand(0).getMBB();
return false;
} else if ((SecondLastInst->getOpcode() == PPC::BDNZ8 ||
SecondLastInst->getOpcode() == PPC::BDNZ) &&
LastInst->getOpcode() == PPC::B) {
if (!SecondLastInst->getOperand(0).isMBB() ||
!LastInst->getOperand(0).isMBB())
return true;
if (DisableCTRLoopAnal)
return true;
TBB = SecondLastInst->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(1));
Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR,
true));
FBB = LastInst->getOperand(0).getMBB();
//.........这里部分代码省略.........
示例5: ExpandMBB
bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
bool Modified = false;
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineInstr &MI = *MBBI;
MachineBasicBlock::iterator NMBBI = llvm::next(MBBI);
unsigned Opcode = MI.getOpcode();
switch (Opcode) {
default: break;
case ARM::tLDRpci_pic:
case ARM::t2LDRpci_pic: {
unsigned NewLdOpc = (Opcode == ARM::tLDRpci_pic)
? ARM::tLDRpci : ARM::t2LDRpci;
unsigned DstReg = MI.getOperand(0).getReg();
if (!MI.getOperand(0).isDead()) {
MachineInstr *NewMI =
AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
TII->get(NewLdOpc), DstReg)
.addOperand(MI.getOperand(1)));
NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD))
.addReg(DstReg, getDefRegState(true))
.addReg(DstReg)
.addOperand(MI.getOperand(2));
}
MI.eraseFromParent();
Modified = true;
break;
}
case ARM::t2MOVi32imm: {
unsigned DstReg = MI.getOperand(0).getReg();
if (!MI.getOperand(0).isDead()) {
const MachineOperand &MO = MI.getOperand(1);
MachineInstrBuilder LO16, HI16;
LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::t2MOVi16),
DstReg);
HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::t2MOVTi16))
.addReg(DstReg, getDefRegState(true)).addReg(DstReg);
if (MO.isImm()) {
unsigned Imm = MO.getImm();
unsigned Lo16 = Imm & 0xffff;
unsigned Hi16 = (Imm >> 16) & 0xffff;
LO16 = LO16.addImm(Lo16);
HI16 = HI16.addImm(Hi16);
} else {
GlobalValue *GV = MO.getGlobal();
unsigned TF = MO.getTargetFlags();
LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);
// FIXME: What's about memoperands?
}
AddDefaultPred(LO16);
AddDefaultPred(HI16);
}
MI.eraseFromParent();
Modified = true;
}
// FIXME: expand t2MOVi32imm
}
示例6: while
bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
bool Modified = false;
SmallSet<unsigned, 4> Defs;
SmallSet<unsigned, 4> Uses;
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineInstr *MI = &*MBBI;
DebugLoc dl = MI->getDebugLoc();
unsigned PredReg = 0;
ARMCC::CondCodes CC = llvm::getITInstrPredicate(MI, PredReg);
if (CC == ARMCC::AL) {
++MBBI;
continue;
}
Defs.clear();
Uses.clear();
TrackDefUses(MI, Defs, Uses, TRI);
// Insert an IT instruction.
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT))
.addImm(CC);
// Add implicit use of ITSTATE to IT block instructions.
MI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
true/*isImp*/, false/*isKill*/));
MachineInstr *LastITMI = MI;
MachineBasicBlock::iterator InsertPos = MIB;
++MBBI;
// Form IT block.
ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
unsigned Mask = 0, Pos = 3;
// Branches, including tricky ones like LDM_RET, need to end an IT
// block so check the instruction we just put in the block.
for (; MBBI != E && Pos &&
(!MI->getDesc().isBranch() && !MI->getDesc().isReturn()) ; ++MBBI) {
if (MBBI->isDebugValue())
continue;
MachineInstr *NMI = &*MBBI;
MI = NMI;
unsigned NPredReg = 0;
ARMCC::CondCodes NCC = llvm::getITInstrPredicate(NMI, NPredReg);
if (NCC == CC || NCC == OCC) {
Mask |= (NCC & 1) << Pos;
// Add implicit use of ITSTATE.
NMI->addOperand(MachineOperand::CreateReg(ARM::ITSTATE, false/*ifDef*/,
true/*isImp*/, false/*isKill*/));
LastITMI = NMI;
} else {
if (NCC == ARMCC::AL &&
MoveCopyOutOfITBlock(NMI, CC, OCC, Defs, Uses)) {
--MBBI;
MBB.remove(NMI);
MBB.insert(InsertPos, NMI);
++NumMovedInsts;
continue;
}
break;
}
TrackDefUses(NMI, Defs, Uses, TRI);
--Pos;
}
// Finalize IT mask.
Mask |= (1 << Pos);
// Tag along (firstcond[0] << 4) with the mask.
Mask |= (CC & 1) << 4;
MIB.addImm(Mask);
// Last instruction in IT block kills ITSTATE.
LastITMI->findRegisterUseOperand(ARM::ITSTATE)->setIsKill();
Modified = true;
++NumITs;
}
return Modified;
}
示例7: switch
void MCS51FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
MCS51MachineFunctionInfo *MCS51FI = MF.getInfo<MCS51MachineFunctionInfo>();
const MCS51InstrInfo &TII =
*static_cast<const MCS51InstrInfo*>(MF.getTarget().getInstrInfo());
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
unsigned RetOpcode = MBBI->getOpcode();
DebugLoc DL = MBBI->getDebugLoc();
switch (RetOpcode) {
case MCS51::RET:
case MCS51::RETI: break; // These are ok
default:
llvm_unreachable("Can only insert epilog into returning blocks");
}
// Get the number of bytes to allocate from the FrameInfo
uint64_t StackSize = MFI->getStackSize();
unsigned CSSize = MCS51FI->getCalleeSavedFrameSize();
uint64_t NumBytes = 0;
if (hasFP(MF)) {
// Calculate required stack adjustment
uint64_t FrameSize = StackSize - 2;
NumBytes = FrameSize - CSSize;
// pop FPW.
BuildMI(MBB, MBBI, DL, TII.get(MCS51::POP16r), MCS51::FPW);
} else
NumBytes = StackSize - CSSize;
// Skip the callee-saved pop instructions.
while (MBBI != MBB.begin()) {
MachineBasicBlock::iterator PI = prior(MBBI);
unsigned Opc = PI->getOpcode();
if (Opc != MCS51::POP16r && !PI->isTerminator())
break;
--MBBI;
}
DL = MBBI->getDebugLoc();
// If there is an ADD16ri or SUB16ri of SPW immediately before this
// instruction, merge the two instructions.
//if (NumBytes || MFI->hasVarSizedObjects())
// mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
if (MFI->hasVarSizedObjects()) {
BuildMI(MBB, MBBI, DL,
TII.get(MCS51::MOV16rr), MCS51::SPW).addReg(MCS51::FPW);
if (CSSize) {
MachineInstr *MI =
BuildMI(MBB, MBBI, DL,
TII.get(MCS51::SUB16ri), MCS51::SPW)
.addReg(MCS51::SPW).addImm(CSSize);
// The SRW implicit def is dead.
MI->getOperand(3).setIsDead();
}
} else {
// adjust stack pointer back: SPW += numbytes
if (NumBytes) {
MachineInstr *MI =
BuildMI(MBB, MBBI, DL, TII.get(MCS51::ADD16ri), MCS51::SPW)
.addReg(MCS51::SPW).addImm(NumBytes);
// The SRW implicit def is dead.
MI->getOperand(3).setIsDead();
}
}
}
示例8: runOnMachineFunction
bool PPCBSel::runOnMachineFunction(MachineFunction &Fn) {
const PPCInstrInfo *TII =
static_cast<const PPCInstrInfo*>(Fn.getTarget().getInstrInfo());
// Give the blocks of the function a dense, in-order, numbering.
Fn.RenumberBlocks();
BlockSizes.resize(Fn.getNumBlockIDs());
// Measure each MBB and compute a size for the entire function.
unsigned FuncSize = 0;
for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
++MFI) {
MachineBasicBlock *MBB = MFI;
unsigned BlockSize = 0;
for (MachineBasicBlock::iterator MBBI = MBB->begin(), EE = MBB->end();
MBBI != EE; ++MBBI)
BlockSize += TII->GetInstSizeInBytes(MBBI);
BlockSizes[MBB->getNumber()] = BlockSize;
FuncSize += BlockSize;
}
// If the entire function is smaller than the displacement of a branch field,
// we know we don't need to shrink any branches in this function. This is a
// common case.
if (FuncSize < (1 << 15)) {
BlockSizes.clear();
return false;
}
// For each conditional branch, if the offset to its destination is larger
// than the offset field allows, transform it into a long branch sequence
// like this:
// short branch:
// bCC MBB
// long branch:
// b!CC $PC+8
// b MBB
//
bool MadeChange = true;
bool EverMadeChange = false;
while (MadeChange) {
// Iteratively expand branches until we reach a fixed point.
MadeChange = false;
for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
++MFI) {
MachineBasicBlock &MBB = *MFI;
unsigned MBBStartOffset = 0;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
MachineBasicBlock *Dest = nullptr;
if (I->getOpcode() == PPC::BCC && !I->getOperand(2).isImm())
Dest = I->getOperand(2).getMBB();
else if ((I->getOpcode() == PPC::BC || I->getOpcode() == PPC::BCn) &&
!I->getOperand(1).isImm())
Dest = I->getOperand(1).getMBB();
else if ((I->getOpcode() == PPC::BDNZ8 || I->getOpcode() == PPC::BDNZ ||
I->getOpcode() == PPC::BDZ8 || I->getOpcode() == PPC::BDZ) &&
!I->getOperand(0).isImm())
Dest = I->getOperand(0).getMBB();
if (!Dest) {
MBBStartOffset += TII->GetInstSizeInBytes(I);
continue;
}
// Determine the offset from the current branch to the destination
// block.
int BranchSize;
if (Dest->getNumber() <= MBB.getNumber()) {
// If this is a backwards branch, the delta is the offset from the
// start of this block to this branch, plus the sizes of all blocks
// from this block to the dest.
BranchSize = MBBStartOffset;
for (unsigned i = Dest->getNumber(), e = MBB.getNumber(); i != e; ++i)
BranchSize += BlockSizes[i];
} else {
// Otherwise, add the size of the blocks between this block and the
// dest to the number of bytes left in this block.
BranchSize = -MBBStartOffset;
for (unsigned i = MBB.getNumber(), e = Dest->getNumber(); i != e; ++i)
BranchSize += BlockSizes[i];
}
// If this branch is in range, ignore it.
if (isInt<16>(BranchSize)) {
MBBStartOffset += 4;
continue;
}
// Otherwise, we have to expand it to a long branch.
MachineInstr *OldBranch = I;
DebugLoc dl = OldBranch->getDebugLoc();
if (I->getOpcode() == PPC::BCC) {
// The BCC operands are:
// 0. PPC branch predicate
//.........这里部分代码省略.........
示例9: insertCSRSpillsAndRestores
/// insertCSRSpillsAndRestores - Insert spill and restore code for
/// callee saved registers used in the function.
///
void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
// Get callee saved register information.
MachineFrameInfo *MFI = Fn.getFrameInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
MFI->setCalleeSavedInfoValid(true);
// Early exit if no callee saved registers are modified!
if (CSI.empty())
return;
const TargetInstrInfo &TII = *Fn.getSubtarget().getInstrInfo();
const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
MachineBasicBlock::iterator I;
// Spill using target interface.
I = EntryBlock->begin();
if (!TFI->spillCalleeSavedRegisters(*EntryBlock, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
// Add the callee-saved register as live-in.
// It's killed at the spill.
EntryBlock->addLiveIn(CSI[i].getReg());
// Insert the spill to the stack frame.
unsigned Reg = CSI[i].getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.storeRegToStackSlot(*EntryBlock, I, Reg, true, CSI[i].getFrameIdx(),
RC, TRI);
}
}
// Restore using target interface.
for (unsigned ri = 0, re = ReturnBlocks.size(); ri != re; ++ri) {
MachineBasicBlock *MBB = ReturnBlocks[ri];
I = MBB->end();
--I;
// Skip over all terminator instructions, which are part of the return
// sequence.
MachineBasicBlock::iterator I2 = I;
while (I2 != MBB->begin() && (--I2)->isTerminator())
I = I2;
bool AtStart = I == MBB->begin();
MachineBasicBlock::iterator BeforeI = I;
if (!AtStart)
--BeforeI;
// Restore all registers immediately before the return and any
// terminators that precede it.
if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
TII.loadRegFromStackSlot(*MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI);
assert(I != MBB->begin() &&
"loadRegFromStackSlot didn't insert any code!");
// Insert in reverse order. loadRegFromStackSlot can insert
// multiple instructions.
if (AtStart)
I = MBB->begin();
else {
I = BeforeI;
++I;
}
}
}
}
}
示例10: HoistOutOfLoop
/// Walk the specified loop in the CFG (defined by all blocks dominated by the
/// specified header block, and that are in the current loop) in depth first
/// order w.r.t the DominatorTree. This allows us to visit definitions before
/// uses, allowing us to hoist a loop body in one pass without iteration.
///
void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
MachineBasicBlock *Preheader = getCurPreheader();
if (!Preheader)
return;
SmallVector<MachineDomTreeNode*, 32> Scopes;
SmallVector<MachineDomTreeNode*, 8> WorkList;
DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
// Perform a DFS walk to determine the order of visit.
WorkList.push_back(HeaderN);
while (!WorkList.empty()) {
MachineDomTreeNode *Node = WorkList.pop_back_val();
assert(Node && "Null dominator tree node?");
MachineBasicBlock *BB = Node->getBlock();
// If the header of the loop containing this basic block is a landing pad,
// then don't try to hoist instructions out of this loop.
const MachineLoop *ML = MLI->getLoopFor(BB);
if (ML && ML->getHeader()->isEHPad())
continue;
// If this subregion is not in the top level loop at all, exit.
if (!CurLoop->contains(BB))
continue;
Scopes.push_back(Node);
const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
unsigned NumChildren = Children.size();
// Don't hoist things out of a large switch statement. This often causes
// code to be hoisted that wasn't going to be executed, and increases
// register pressure in a situation where it's likely to matter.
if (BB->succ_size() >= 25)
NumChildren = 0;
OpenChildren[Node] = NumChildren;
// Add children in reverse order as then the next popped worklist node is
// the first child of this node. This means we ultimately traverse the
// DOM tree in exactly the same order as if we'd recursed.
for (int i = (int)NumChildren-1; i >= 0; --i) {
MachineDomTreeNode *Child = Children[i];
ParentMap[Child] = Node;
WorkList.push_back(Child);
}
}
if (Scopes.size() == 0)
return;
// Compute registers which are livein into the loop headers.
RegSeen.clear();
BackTrace.clear();
InitRegPressure(Preheader);
// Now perform LICM.
for (MachineDomTreeNode *Node : Scopes) {
MachineBasicBlock *MBB = Node->getBlock();
EnterScope(MBB);
// Process the block
SpeculationState = SpeculateUnknown;
for (MachineBasicBlock::iterator
MII = MBB->begin(), E = MBB->end(); MII != E; ) {
MachineBasicBlock::iterator NextMII = MII; ++NextMII;
MachineInstr *MI = &*MII;
if (!Hoist(MI, Preheader))
UpdateRegPressure(MI);
MII = NextMII;
}
// If it's a leaf node, it's done. Traverse upwards to pop ancestors.
ExitScopeIfDone(Node, OpenChildren, ParentMap);
}
}
示例11: AnalyzeBranch
bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
// Most of the code and comments here are boilerplate.
// Start from the bottom of the block and work up, examining the
// terminator instructions.
MachineBasicBlock::iterator I = MBB.end();
while (I != MBB.begin()) {
--I;
if (I->isDebugValue())
continue;
// Working from the bottom, when we see a non-terminator instruction, we're
// done.
if (!isUnpredicatedTerminator(I))
break;
// A terminator that isn't a branch can't easily be handled by this
// analysis.
unsigned ThisCond;
const MachineOperand *ThisTarget;
if (!isBranch(I, ThisCond, ThisTarget))
return true;
// Can't handle indirect branches.
if (!ThisTarget->isMBB())
return true;
if (ThisCond == SystemZ::CCMASK_ANY) {
// Handle unconditional branches.
if (!AllowModify) {
TBB = ThisTarget->getMBB();
continue;
}
// If the block has any instructions after a JMP, delete them.
while (llvm::next(I) != MBB.end())
llvm::next(I)->eraseFromParent();
Cond.clear();
FBB = 0;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(ThisTarget->getMBB())) {
TBB = 0;
I->eraseFromParent();
I = MBB.end();
continue;
}
// TBB is used to indicate the unconditinal destination.
TBB = ThisTarget->getMBB();
continue;
}
// Working from the bottom, handle the first conditional branch.
if (Cond.empty()) {
// FIXME: add X86-style branch swap
FBB = TBB;
TBB = ThisTarget->getMBB();
Cond.push_back(MachineOperand::CreateImm(ThisCond));
continue;
}
// Handle subsequent conditional branches.
assert(Cond.size() == 1);
assert(TBB);
// Only handle the case where all conditional branches branch to the same
// destination.
if (TBB != ThisTarget->getMBB())
return true;
// If the conditions are the same, we can leave them alone.
unsigned OldCond = Cond[0].getImm();
if (OldCond == ThisCond)
continue;
// FIXME: Try combining conditions like X86 does. Should be easy on Z!
}
return false;
}
示例12: while
void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
MachineFrameInfo *MFI = MF.getFrameInfo();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL;
bool IsTailCallReturn = false;
if (MBB.end() != MBBI) {
DL = MBBI->getDebugLoc();
unsigned RetOpcode = MBBI->getOpcode();
IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
RetOpcode == AArch64::TCRETURNri;
}
int NumBytes = MFI->getStackSize();
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
return;
// Initial and residual are named for consistency with the prologue. Note that
// in the epilogue, the residual adjustment is executed first.
uint64_t ArgumentPopSize = 0;
if (IsTailCallReturn) {
MachineOperand &StackAdjust = MBBI->getOperand(1);
// For a tail-call in a callee-pops-arguments environment, some or all of
// the stack may actually be in use for the call's arguments, this is
// calculated during LowerCall and consumed here...
ArgumentPopSize = StackAdjust.getImm();
} else {
// ... otherwise the amount to pop is *all* of the argument space,
// conveniently stored in the MachineFunctionInfo by
// LowerFormalArguments. This will, of course, be zero for the C calling
// convention.
ArgumentPopSize = AFI->getArgumentStackToRestore();
}
// The stack frame should be like below,
//
// ---------------------- ---
// | | |
// | BytesInStackArgArea| CalleeArgStackSize
// | (NumReusableBytes) | (of tail call)
// | | ---
// | | |
// ---------------------| --- |
// | | | |
// | CalleeSavedReg | | |
// | (CalleeSavedStackSize)| | |
// | | | |
// ---------------------| | NumBytes
// | | StackSize (StackAdjustUp)
// | LocalStackSize | | |
// | (covering callee | | |
// | args) | | |
// | | | |
// ---------------------- --- ---
//
// So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize
// = StackSize + ArgumentPopSize
//
// AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
// it as the 2nd argument of AArch64ISD::TC_RETURN.
// Move past the restores of the callee-saved registers.
MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator();
MachineBasicBlock::iterator Begin = MBB.begin();
while (LastPopI != Begin) {
--LastPopI;
if (!LastPopI->getFlag(MachineInstr::FrameDestroy)) {
++LastPopI;
break;
}
}
NumBytes -= AFI->getCalleeSavedStackSize();
assert(NumBytes >= 0 && "Negative stack allocation size!?");
if (!hasFP(MF)) {
bool RedZone = canUseRedZone(MF);
// If this was a redzone leaf function, we don't need to restore the
// stack pointer (but we may need to pop stack args for fastcc).
if (RedZone && ArgumentPopSize == 0)
return;
bool NoCalleeSaveRestore = AFI->getCalleeSavedStackSize() == 0;
int StackRestoreBytes = RedZone ? 0 : NumBytes;
if (NoCalleeSaveRestore)
StackRestoreBytes += ArgumentPopSize;
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
StackRestoreBytes, TII, MachineInstr::FrameDestroy);
// If we were able to combine the local stack pop with the argument pop,
// then we're done.
if (NoCalleeSaveRestore || ArgumentPopSize == 0)
return;
NumBytes = 0;
}
//.........这里部分代码省略.........
示例13: expandAtomicBinOp
//.........这里部分代码省略.........
unsigned Scratch = I->getOperand(3).getReg();
unsigned Opcode = 0;
unsigned OR = 0;
unsigned AND = 0;
unsigned NOR = 0;
bool IsNand = false;
switch (I->getOpcode()) {
case Mips::ATOMIC_LOAD_ADD_I32_POSTRA:
Opcode = Mips::ADDu;
break;
case Mips::ATOMIC_LOAD_SUB_I32_POSTRA:
Opcode = Mips::SUBu;
break;
case Mips::ATOMIC_LOAD_AND_I32_POSTRA:
Opcode = Mips::AND;
break;
case Mips::ATOMIC_LOAD_OR_I32_POSTRA:
Opcode = Mips::OR;
break;
case Mips::ATOMIC_LOAD_XOR_I32_POSTRA:
Opcode = Mips::XOR;
break;
case Mips::ATOMIC_LOAD_NAND_I32_POSTRA:
IsNand = true;
AND = Mips::AND;
NOR = Mips::NOR;
break;
case Mips::ATOMIC_SWAP_I32_POSTRA:
OR = Mips::OR;
break;
case Mips::ATOMIC_LOAD_ADD_I64_POSTRA:
Opcode = Mips::DADDu;
break;
case Mips::ATOMIC_LOAD_SUB_I64_POSTRA:
Opcode = Mips::DSUBu;
break;
case Mips::ATOMIC_LOAD_AND_I64_POSTRA:
Opcode = Mips::AND64;
break;
case Mips::ATOMIC_LOAD_OR_I64_POSTRA:
Opcode = Mips::OR64;
break;
case Mips::ATOMIC_LOAD_XOR_I64_POSTRA:
Opcode = Mips::XOR64;
break;
case Mips::ATOMIC_LOAD_NAND_I64_POSTRA:
IsNand = true;
AND = Mips::AND64;
NOR = Mips::NOR64;
break;
case Mips::ATOMIC_SWAP_I64_POSTRA:
OR = Mips::OR64;
break;
default:
llvm_unreachable("Unknown pseudo atomic!");
}
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB.getIterator();
MF->insert(It, loopMBB);
MF->insert(It, exitMBB);
exitMBB->splice(exitMBB->begin(), &BB, std::next(I), BB.end());
exitMBB->transferSuccessorsAndUpdatePHIs(&BB);
BB.addSuccessor(loopMBB, BranchProbability::getOne());
loopMBB->addSuccessor(exitMBB);
loopMBB->addSuccessor(loopMBB);
loopMBB->normalizeSuccProbs();
BuildMI(loopMBB, DL, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
assert((OldVal != Ptr) && "Clobbered the wrong ptr reg!");
assert((OldVal != Incr) && "Clobbered the wrong reg!");
if (Opcode) {
BuildMI(loopMBB, DL, TII->get(Opcode), Scratch).addReg(OldVal).addReg(Incr);
} else if (IsNand) {
assert(AND && NOR &&
"Unknown nand instruction for atomic pseudo expansion");
BuildMI(loopMBB, DL, TII->get(AND), Scratch).addReg(OldVal).addReg(Incr);
BuildMI(loopMBB, DL, TII->get(NOR), Scratch).addReg(ZERO).addReg(Scratch);
} else {
assert(OR && "Unknown instruction for atomic pseudo expansion!");
BuildMI(loopMBB, DL, TII->get(OR), Scratch).addReg(Incr).addReg(ZERO);
}
BuildMI(loopMBB, DL, TII->get(SC), Scratch).addReg(Scratch).addReg(Ptr).addImm(0);
BuildMI(loopMBB, DL, TII->get(BEQ)).addReg(Scratch).addReg(ZERO).addMBB(loopMBB);
NMBBI = BB.end();
I->eraseFromParent();
LivePhysRegs LiveRegs;
computeAndAddLiveIns(LiveRegs, *loopMBB);
computeAndAddLiveIns(LiveRegs, *exitMBB);
return true;
}
示例14: SinkInstruction
//.........这里部分代码省略.........
// "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
const MachineOperand &MO = MI->getOperand(I);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
if (SuccToSinkTo->isLiveIn(Reg))
return false;
}
DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
// If the block has multiple predecessors, this is a critical edge.
// Decide if we can sink along it or need to break the edge.
if (SuccToSinkTo->pred_size() > 1) {
// We cannot sink a load across a critical edge - there may be stores in
// other code paths.
bool TryBreak = false;
bool store = true;
if (!MI->isSafeToMove(AA, store)) {
DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
TryBreak = true;
}
// We don't want to sink across a critical edge if we don't dominate the
// successor. We could be introducing calculations to new code paths.
if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
TryBreak = true;
}
// Don't sink instructions into a loop.
if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
DEBUG(dbgs() << " *** NOTE: Loop header found\n");
TryBreak = true;
}
// Otherwise we are OK with sinking along a critical edge.
if (!TryBreak)
DEBUG(dbgs() << "Sinking along critical edge.\n");
else {
// Mark this edge as to be split.
// If the edge can actually be split, the next iteration of the main loop
// will sink MI in the newly created block.
bool Status =
PostponeSplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
if (!Status)
DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
"break critical edge\n");
// The instruction will not be sunk this time.
return false;
}
}
if (BreakPHIEdge) {
// BreakPHIEdge is true if all the uses are in the successor MBB being
// sunken into and they are all PHI nodes. In this case, machine-sink must
// break the critical edge first.
bool Status = PostponeSplitCriticalEdge(MI, ParentBlock,
SuccToSinkTo, BreakPHIEdge);
if (!Status)
DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
"break critical edge\n");
// The instruction will not be sunk this time.
return false;
}
// Determine where to insert into. Skip phi nodes.
MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
++InsertPos;
// collect matching debug values.
SmallVector<MachineInstr *, 2> DbgValuesToSink;
collectDebugValues(MI, DbgValuesToSink);
// Move the instruction.
SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
++MachineBasicBlock::iterator(MI));
// Move debug values.
for (SmallVectorImpl<MachineInstr *>::iterator DBI = DbgValuesToSink.begin(),
DBE = DbgValuesToSink.end(); DBI != DBE; ++DBI) {
MachineInstr *DbgMI = *DBI;
SuccToSinkTo->splice(InsertPos, ParentBlock, DbgMI,
++MachineBasicBlock::iterator(DbgMI));
}
// Conservatively, clear any kill flags, since it's possible that they are no
// longer correct.
// Note that we have to clear the kill flags for any register this instruction
// uses as we may sink over another instruction which currently kills the
// used registers.
for (MachineOperand &MO : MI->operands()) {
if (MO.isReg() && MO.isUse())
RegsToClearKillFlags.set(MO.getReg()); // Remember to clear kill flags.
}
return true;
}
示例15: expandAtomicCmpSwapSubword
bool MipsExpandPseudo::expandAtomicCmpSwapSubword(
MachineBasicBlock &BB, MachineBasicBlock::iterator I,
MachineBasicBlock::iterator &NMBBI) {
MachineFunction *MF = BB.getParent();
const bool ArePtrs64bit = STI->getABI().ArePtrs64bit();
DebugLoc DL = I->getDebugLoc();
unsigned LL, SC;
unsigned ZERO = Mips::ZERO;
unsigned BNE = Mips::BNE;
unsigned BEQ = Mips::BEQ;
unsigned SEOp =
I->getOpcode() == Mips::ATOMIC_CMP_SWAP_I8_POSTRA ? Mips::SEB : Mips::SEH;
if (STI->inMicroMipsMode()) {
LL = STI->hasMips32r6() ? Mips::LL_MMR6 : Mips::LL_MM;
SC = STI->hasMips32r6() ? Mips::SC_MMR6 : Mips::SC_MM;
BNE = STI->hasMips32r6() ? Mips::BNEC_MMR6 : Mips::BNE_MM;
BEQ = STI->hasMips32r6() ? Mips::BEQC_MMR6 : Mips::BEQ_MM;
} else {
LL = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::LL64_R6 : Mips::LL_R6)
: (ArePtrs64bit ? Mips::LL64 : Mips::LL);
SC = STI->hasMips32r6() ? (ArePtrs64bit ? Mips::SC64_R6 : Mips::SC_R6)
: (ArePtrs64bit ? Mips::SC64 : Mips::SC);
}
unsigned Dest = I->getOperand(0).getReg();
unsigned Ptr = I->getOperand(1).getReg();
unsigned Mask = I->getOperand(2).getReg();
unsigned ShiftCmpVal = I->getOperand(3).getReg();
unsigned Mask2 = I->getOperand(4).getReg();
unsigned ShiftNewVal = I->getOperand(5).getReg();
unsigned ShiftAmnt = I->getOperand(6).getReg();
unsigned Scratch = I->getOperand(7).getReg();
unsigned Scratch2 = I->getOperand(8).getReg();
// insert new blocks after the current block
const BasicBlock *LLVM_BB = BB.getBasicBlock();
MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++BB.getIterator();
MF->insert(It, loop1MBB);
MF->insert(It, loop2MBB);
MF->insert(It, sinkMBB);
MF->insert(It, exitMBB);
// Transfer the remainder of BB and its successor edges to exitMBB.
exitMBB->splice(exitMBB->begin(), &BB,
std::next(MachineBasicBlock::iterator(I)), BB.end());
exitMBB->transferSuccessorsAndUpdatePHIs(&BB);
// thisMBB:
// ...
// fallthrough --> loop1MBB
BB.addSuccessor(loop1MBB, BranchProbability::getOne());
loop1MBB->addSuccessor(sinkMBB);
loop1MBB->addSuccessor(loop2MBB);
loop1MBB->normalizeSuccProbs();
loop2MBB->addSuccessor(loop1MBB);
loop2MBB->addSuccessor(sinkMBB);
loop2MBB->normalizeSuccProbs();
sinkMBB->addSuccessor(exitMBB, BranchProbability::getOne());
// loop1MBB:
// ll dest, 0(ptr)
// and Mask', dest, Mask
// bne Mask', ShiftCmpVal, exitMBB
BuildMI(loop1MBB, DL, TII->get(LL), Scratch).addReg(Ptr).addImm(0);
BuildMI(loop1MBB, DL, TII->get(Mips::AND), Scratch2)
.addReg(Scratch)
.addReg(Mask);
BuildMI(loop1MBB, DL, TII->get(BNE))
.addReg(Scratch2).addReg(ShiftCmpVal).addMBB(sinkMBB);
// loop2MBB:
// and dest, dest, mask2
// or dest, dest, ShiftNewVal
// sc dest, dest, 0(ptr)
// beq dest, $0, loop1MBB
BuildMI(loop2MBB, DL, TII->get(Mips::AND), Scratch)
.addReg(Scratch, RegState::Kill)
.addReg(Mask2);
BuildMI(loop2MBB, DL, TII->get(Mips::OR), Scratch)
.addReg(Scratch, RegState::Kill)
.addReg(ShiftNewVal);
BuildMI(loop2MBB, DL, TII->get(SC), Scratch)
.addReg(Scratch, RegState::Kill)
.addReg(Ptr)
.addImm(0);
BuildMI(loop2MBB, DL, TII->get(BEQ))
.addReg(Scratch, RegState::Kill)
.addReg(ZERO)
.addMBB(loop1MBB);
// sinkMBB:
// srl srlres, Mask', shiftamt
//.........这里部分代码省略.........