本文整理汇总了C++中machinebasicblock::iterator::getOperand方法的典型用法代码示例。如果您正苦于以下问题:C++ iterator::getOperand方法的具体用法?C++ iterator::getOperand怎么用?C++ iterator::getOperand使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类machinebasicblock::iterator
的用法示例。
在下文中一共展示了iterator::getOperand方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: BuildMI
bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
const CallContext &Context) {
// Ok, we can in fact do the transformation for this call.
// Do not remove the FrameSetup instruction, but adjust the parameters.
// PEI will end up finalizing the handling of this.
MachineBasicBlock::iterator FrameSetup = Context.FrameSetup;
MachineBasicBlock &MBB = *(FrameSetup->getParent());
FrameSetup->getOperand(1).setImm(Context.ExpectedDist);
DebugLoc DL = FrameSetup->getDebugLoc();
// Now, iterate through the vector in reverse order, and replace the movs
// with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
// replace uses.
for (int Idx = (Context.ExpectedDist / 4) - 1; Idx >= 0; --Idx) {
MachineBasicBlock::iterator MOV = *Context.MovVector[Idx];
MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);
MachineBasicBlock::iterator Push = nullptr;
if (MOV->getOpcode() == X86::MOV32mi) {
unsigned PushOpcode = X86::PUSHi32;
// If the operand is a small (8-bit) immediate, we can use a
// PUSH instruction with a shorter encoding.
// Note that isImm() may fail even though this is a MOVmi, because
// the operand can also be a symbol.
if (PushOp.isImm()) {
int64_t Val = PushOp.getImm();
if (isInt<8>(Val))
PushOpcode = X86::PUSH32i8;
}
Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode))
.addOperand(PushOp);
} else {
unsigned int Reg = PushOp.getReg();
// If PUSHrmm is not slow on this target, try to fold the source of the
// push into the instruction.
bool SlowPUSHrmm = STI->isAtom() || STI->isSLM();
// Check that this is legal to fold. Right now, we're extremely
// conservative about that.
MachineInstr *DefMov = nullptr;
if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
Push = BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32rmm));
unsigned NumOps = DefMov->getDesc().getNumOperands();
for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
Push->addOperand(DefMov->getOperand(i));
DefMov->eraseFromParent();
} else {
Push = BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32r))
.addReg(Reg)
.getInstr();
}
}
// For debugging, when using SP-based CFA, we need to adjust the CFA
// offset after each push.
// TODO: This is needed only if we require precise CFA.
if (!TFL->hasFP(MF))
TFL->BuildCFI(MBB, std::next(Push), DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, 4));
MBB.erase(MOV);
}
// The stack-pointer copy is no longer used in the call sequences.
// There should not be any other users, but we can't commit to that, so:
if (MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
Context.SPCopy->eraseFromParent();
// Once we've done this, we need to make sure PEI doesn't assume a reserved
// frame.
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
FuncInfo->setHasPushSequences(true);
return true;
}
示例2: findSurvivorReg
/// findSurvivorReg - Return the candidate register that is unused for the
/// longest after StargMII. UseMI is set to the instruction where the search
/// stopped.
///
/// No more than InstrLimit instructions are inspected.
///
unsigned RegScavenger::findSurvivorReg(MachineBasicBlock::iterator StartMI,
BitVector &Candidates,
unsigned InstrLimit,
MachineBasicBlock::iterator &UseMI) {
int Survivor = Candidates.find_first();
assert(Survivor > 0 && "No candidates for scavenging");
MachineBasicBlock::iterator ME = MBB->getFirstTerminator();
assert(StartMI != ME && "MI already at terminator");
MachineBasicBlock::iterator RestorePointMI = StartMI;
MachineBasicBlock::iterator MI = StartMI;
bool inVirtLiveRange = false;
for (++MI; InstrLimit > 0 && MI != ME; ++MI, --InstrLimit) {
if (MI->isDebugValue()) {
++InstrLimit; // Don't count debug instructions
continue;
}
bool isVirtKillInsn = false;
bool isVirtDefInsn = false;
// Remove any candidates touched by instruction.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask())
Candidates.clearBitsNotInMask(MO.getRegMask());
if (!MO.isReg() || MO.isUndef() || !MO.getReg())
continue;
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
if (MO.isDef())
isVirtDefInsn = true;
else if (MO.isKill())
isVirtKillInsn = true;
continue;
}
for (MCRegAliasIterator AI(MO.getReg(), TRI, true); AI.isValid(); ++AI)
Candidates.reset(*AI);
}
// If we're not in a virtual reg's live range, this is a valid
// restore point.
if (!inVirtLiveRange) RestorePointMI = MI;
// Update whether we're in the live range of a virtual register
if (isVirtKillInsn) inVirtLiveRange = false;
if (isVirtDefInsn) inVirtLiveRange = true;
// Was our survivor untouched by this instruction?
if (Candidates.test(Survivor))
continue;
// All candidates gone?
if (Candidates.none())
break;
Survivor = Candidates.find_first();
}
// If we ran off the end, that's where we want to restore.
if (MI == ME) RestorePointMI = ME;
assert (RestorePointMI != StartMI &&
"No available scavenger restore location!");
// We ran out of candidates, so stop the search.
UseMI = RestorePointMI;
return Survivor;
}
示例3: switch
X86CallFrameOptimization::InstClassification
X86CallFrameOptimization::classifyInstruction(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
const X86RegisterInfo &RegInfo, DenseSet<unsigned int> &UsedRegs) {
if (MI == MBB.end())
return Exit;
// The instructions we actually care about are movs onto the stack or special
// cases of constant-stores to stack
switch (MI->getOpcode()) {
case X86::AND16mi8:
case X86::AND32mi8:
case X86::AND64mi8: {
MachineOperand ImmOp = MI->getOperand(X86::AddrNumOperands);
return ImmOp.getImm() == 0 ? Convert : Exit;
}
case X86::OR16mi8:
case X86::OR32mi8:
case X86::OR64mi8: {
MachineOperand ImmOp = MI->getOperand(X86::AddrNumOperands);
return ImmOp.getImm() == -1 ? Convert : Exit;
}
case X86::MOV32mi:
case X86::MOV32mr:
case X86::MOV64mi32:
case X86::MOV64mr:
return Convert;
}
// Not all calling conventions have only stack MOVs between the stack
// adjust and the call.
// We want to tolerate other instructions, to cover more cases.
// In particular:
// a) PCrel calls, where we expect an additional COPY of the basereg.
// b) Passing frame-index addresses.
// c) Calling conventions that have inreg parameters. These generate
// both copies and movs into registers.
// To avoid creating lots of special cases, allow any instruction
// that does not write into memory, does not def or use the stack
// pointer, and does not def any register that was used by a preceding
// push.
// (Reading from memory is allowed, even if referenced through a
// frame index, since these will get adjusted properly in PEI)
// The reason for the last condition is that the pushes can't replace
// the movs in place, because the order must be reversed.
// So if we have a MOV32mr that uses EDX, then an instruction that defs
// EDX, and then the call, after the transformation the push will use
// the modified version of EDX, and not the original one.
// Since we are still in SSA form at this point, we only need to
// make sure we don't clobber any *physical* registers that were
// used by an earlier mov that will become a push.
if (MI->isCall() || MI->mayStore())
return Exit;
for (const MachineOperand &MO : MI->operands()) {
if (!MO.isReg())
continue;
unsigned int Reg = MO.getReg();
if (!RegInfo.isPhysicalRegister(Reg))
continue;
if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister()))
return Exit;
if (MO.isDef()) {
for (unsigned int U : UsedRegs)
if (RegInfo.regsOverlap(Reg, U))
return Exit;
}
}
return Skip;
}
示例4: pushInstruction
void SIInsertWaits::pushInstruction(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const Counters &Increment) {
// Get the hardware counter increments and sum them up
Counters Limit = ZeroCounts;
unsigned Sum = 0;
if (TII->mayAccessFlatAddressSpace(*I))
IsFlatOutstanding = true;
for (unsigned i = 0; i < 3; ++i) {
LastIssued.Array[i] += Increment.Array[i];
if (Increment.Array[i])
Limit.Array[i] = LastIssued.Array[i];
Sum += Increment.Array[i];
}
// If we don't increase anything then that's it
if (Sum == 0) {
LastOpcodeType = OTHER;
return;
}
if (ST->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
// Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM
// or SMEM clause, respectively.
//
// The temporary workaround is to break the clauses with S_NOP.
//
// The proper solution would be to allocate registers such that all source
// and destination registers don't overlap, e.g. this is illegal:
// r0 = load r2
// r2 = load r0
if (LastOpcodeType == VMEM && Increment.Named.VM) {
// Insert a NOP to break the clause.
BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::S_NOP))
.addImm(0);
LastInstWritesM0 = false;
}
if (TII->isSMRD(*I))
LastOpcodeType = SMEM;
else if (Increment.Named.VM)
LastOpcodeType = VMEM;
}
// Remember which export instructions we have seen
if (Increment.Named.EXP) {
ExpInstrTypesSeen |= TII->isEXP(*I) ? 1 : 2;
}
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
MachineOperand &Op = I->getOperand(i);
if (!isOpRelevant(Op))
continue;
const TargetRegisterClass *RC = TII->getOpRegClass(*I, i);
RegInterval Interval = getRegInterval(RC, Op);
for (unsigned j = Interval.first; j < Interval.second; ++j) {
// Remember which registers we define
if (Op.isDef())
DefinedRegs[j] = Limit;
// and which one we are using
if (Op.isUse())
UsedRegs[j] = Limit;
}
}
}
示例5: assert
// Finds compare instruction that corresponds to supported types of branching.
// Returns the instruction or nullptr on failures or detecting unsupported
// instructions.
MachineInstr *AArch64ConditionOptimizer::findSuitableCompare(
MachineBasicBlock *MBB) {
MachineBasicBlock::iterator I = MBB->getFirstTerminator();
if (I == MBB->end())
return nullptr;
if (I->getOpcode() != AArch64::Bcc)
return nullptr;
// Since we may modify cmp of this MBB, make sure NZCV does not live out.
for (auto SuccBB : MBB->successors())
if (SuccBB->isLiveIn(AArch64::NZCV))
return nullptr;
// Now find the instruction controlling the terminator.
for (MachineBasicBlock::iterator B = MBB->begin(); I != B;) {
--I;
assert(!I->isTerminator() && "Spurious terminator");
// Check if there is any use of NZCV between CMP and Bcc.
if (I->readsRegister(AArch64::NZCV))
return nullptr;
switch (I->getOpcode()) {
// cmp is an alias for subs with a dead destination register.
case AArch64::SUBSWri:
case AArch64::SUBSXri:
// cmn is an alias for adds with a dead destination register.
case AArch64::ADDSWri:
case AArch64::ADDSXri: {
unsigned ShiftAmt = AArch64_AM::getShiftValue(I->getOperand(3).getImm());
if (!I->getOperand(2).isImm()) {
DEBUG(dbgs() << "Immediate of cmp is symbolic, " << *I << '\n');
return nullptr;
} else if (I->getOperand(2).getImm() << ShiftAmt >= 0xfff) {
DEBUG(dbgs() << "Immediate of cmp may be out of range, " << *I << '\n');
return nullptr;
} else if (!MRI->use_empty(I->getOperand(0).getReg())) {
DEBUG(dbgs() << "Destination of cmp is not dead, " << *I << '\n');
return nullptr;
}
return I;
}
// Prevent false positive case like:
// cmp w19, #0
// cinc w0, w19, gt
// ...
// fcmp d8, #0.0
// b.gt .LBB0_5
case AArch64::FCMPDri:
case AArch64::FCMPSri:
case AArch64::FCMPESri:
case AArch64::FCMPEDri:
case AArch64::SUBSWrr:
case AArch64::SUBSXrr:
case AArch64::ADDSWrr:
case AArch64::ADDSXrr:
case AArch64::FCMPSrr:
case AArch64::FCMPDrr:
case AArch64::FCMPESrr:
case AArch64::FCMPEDrr:
// Skip comparison instructions without immediate operands.
return nullptr;
}
}
DEBUG(dbgs() << "Flags not defined in BB#" << MBB->getNumber() << '\n');
return nullptr;
}
示例6: analyzeFrameIndexes
static void analyzeFrameIndexes(MachineFunction &MF) {
if (MBDisableStackAdjust) return;
MachineFrameInfo *MFI = MF.getFrameInfo();
MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
const MachineRegisterInfo &MRI = MF.getRegInfo();
MachineRegisterInfo::livein_iterator LII = MRI.livein_begin();
MachineRegisterInfo::livein_iterator LIE = MRI.livein_end();
const SmallVector<int, 16> &LiveInFI = MBlazeFI->getLiveIn();
SmallVector<MachineInstr*, 16> EraseInstr;
SmallVector<std::pair<int,int64_t>, 16> FrameRelocate;
MachineBasicBlock *MBB = MF.getBlockNumbered(0);
MachineBasicBlock::iterator MIB = MBB->begin();
MachineBasicBlock::iterator MIE = MBB->end();
int StackAdjust = 0;
int StackOffset = -28;
// In this loop we are searching frame indexes that corrospond to incoming
// arguments that are already in the stack. We look for instruction sequences
// like the following:
//
// LWI REG, FI1, 0
// ...
// SWI REG, FI2, 0
//
// As long as there are no defs of REG in the ... part, we can eliminate
// the SWI instruction because the value has already been stored to the
// stack by the caller. All we need to do is locate FI at the correct
// stack location according to the calling convensions.
//
// Additionally, if the SWI operation kills the def of REG then we don't
// need the LWI operation so we can erase it as well.
for (unsigned i = 0, e = LiveInFI.size(); i < e; ++i) {
for (MachineBasicBlock::iterator I=MIB; I != MIE; ++I) {
if (I->getOpcode() != MBlaze::LWI || I->getNumOperands() != 3 ||
!I->getOperand(1).isFI() || !I->getOperand(0).isReg() ||
I->getOperand(1).getIndex() != LiveInFI[i]) continue;
unsigned FIReg = I->getOperand(0).getReg();
MachineBasicBlock::iterator SI = I;
for (SI++; SI != MIE; ++SI) {
if (!SI->getOperand(0).isReg() ||
!SI->getOperand(1).isFI() ||
SI->getOpcode() != MBlaze::SWI) continue;
int FI = SI->getOperand(1).getIndex();
if (SI->getOperand(0).getReg() != FIReg ||
MFI->isFixedObjectIndex(FI) ||
MFI->getObjectSize(FI) != 4) continue;
if (SI->getOperand(0).isDef()) break;
if (SI->getOperand(0).isKill()) {
DEBUG(dbgs() << "LWI for FI#" << I->getOperand(1).getIndex()
<< " removed\n");
EraseInstr.push_back(I);
}
EraseInstr.push_back(SI);
DEBUG(dbgs() << "SWI for FI#" << FI << " removed\n");
FrameRelocate.push_back(std::make_pair(FI,StackOffset));
DEBUG(dbgs() << "FI#" << FI << " relocated to " << StackOffset << "\n");
StackOffset -= 4;
StackAdjust += 4;
break;
}
}
}
// In this loop we are searching for frame indexes that corrospond to
// incoming arguments that are in registers. We look for instruction
// sequences like the following:
//
// ... SWI REG, FI, 0
//
// As long as the ... part does not define REG and if REG is an incoming
// parameter register then we know that, according to ABI convensions, the
// caller has allocated stack space for it already. Instead of allocating
// stack space on our frame, we record the correct location in the callers
// frame.
for (MachineRegisterInfo::livein_iterator LI = LII; LI != LIE; ++LI) {
for (MachineBasicBlock::iterator I=MIB; I != MIE; ++I) {
if (I->definesRegister(LI->first))
break;
if (I->getOpcode() != MBlaze::SWI || I->getNumOperands() != 3 ||
!I->getOperand(1).isFI() || !I->getOperand(0).isReg() ||
I->getOperand(1).getIndex() < 0) continue;
if (I->getOperand(0).getReg() == LI->first) {
int FI = I->getOperand(1).getIndex();
MBlazeFI->recordLiveIn(FI);
int FILoc = 0;
switch (LI->first) {
//.........这里部分代码省略.........
示例7: getParent
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
MachineFunction *MF = getParent();
DebugLoc dl; // FIXME: this is nowhere
// We may need to update this's terminator, but we can't do that if
// AnalyzeBranch fails. If this uses a jump table, we won't touch it.
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
return NULL;
// Avoid bugpoint weirdness: A block may end with a conditional branch but
// jumps to the same MBB is either case. We have duplicate CFG edges in that
// case that we can't handle. Since this never happens in properly optimized
// code, just skip those edges.
if (TBB && TBB == FBB) {
DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
<< getNumber() << '\n');
return NULL;
}
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
DEBUG(dbgs() << "Splitting critical edge:"
" BB#" << getNumber()
<< " -- BB#" << NMBB->getNumber()
<< " -- BB#" << Succ->getNumber() << '\n');
// On some targets like Mips, branches may kill virtual registers. Make sure
// that LiveVariables is properly updated after updateTerminator replaces the
// terminators.
LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();
// Collect a list of virtual registers killed by the terminators.
SmallVector<unsigned, 4> KilledRegs;
if (LV)
for (iterator I = getFirstTerminator(), E = end(); I != E; ++I) {
MachineInstr *MI = I;
for (MachineInstr::mop_iterator OI = MI->operands_begin(),
OE = MI->operands_end(); OI != OE; ++OI) {
if (!OI->isReg() || !OI->isUse() || !OI->isKill() || OI->isUndef())
continue;
unsigned Reg = OI->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg) &&
LV->getVarInfo(Reg).removeKill(MI)) {
KilledRegs.push_back(Reg);
DEBUG(dbgs() << "Removing terminator kill: " << *MI);
OI->setIsKill(false);
}
}
}
ReplaceUsesOfBlockWith(Succ, NMBB);
updateTerminator();
// Insert unconditional "jump Succ" instruction in NMBB if necessary.
NMBB->addSuccessor(Succ);
if (!NMBB->isLayoutSuccessor(Succ)) {
Cond.clear();
MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);
}
// Fix PHI nodes in Succ so they refer to NMBB instead of this
for (MachineBasicBlock::iterator i = Succ->begin(), e = Succ->end();
i != e && i->isPHI(); ++i)
for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
if (i->getOperand(ni+1).getMBB() == this)
i->getOperand(ni+1).setMBB(NMBB);
// Inherit live-ins from the successor
for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
E = Succ->livein_end(); I != E; ++I)
NMBB->addLiveIn(*I);
// Update LiveVariables.
if (LV) {
// Restore kills of virtual registers that were killed by the terminators.
while (!KilledRegs.empty()) {
unsigned Reg = KilledRegs.pop_back_val();
for (iterator I = end(), E = begin(); I != E;) {
if (!(--I)->addRegisterKilled(Reg, NULL, /* addIfNotFound= */ false))
continue;
LV->getVarInfo(Reg).Kills.push_back(I);
DEBUG(dbgs() << "Restored terminator kill: " << *I);
break;
}
}
// Update relevant live-through information.
LV->addNewBlock(NMBB, this, Succ);
}
if (MachineDominatorTree *MDT =
P->getAnalysisIfAvailable<MachineDominatorTree>()) {
// Update dominator information.
MachineDomTreeNode *SucccDTNode = MDT->getNode(Succ);
bool IsNewIDom = true;
for (const_pred_iterator PI = Succ->pred_begin(), E = Succ->pred_end();
//.........这里部分代码省略.........
示例8: hasFP
void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
MachineBasicBlock::iterator MBBI = MBB.begin();
const MachineFrameInfo *MFI = MF.getFrameInfo();
const Function *Fn = MF.getFunction();
const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
MF.getSubtarget().getRegisterInfo());
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
MachineModuleInfo &MMI = MF.getMMI();
AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
bool HasFP = hasFP(MF);
DebugLoc DL = MBB.findDebugLoc(MBBI);
int NumBytes = (int)MFI->getStackSize();
if (!AFI->hasStackFrame()) {
assert(!HasFP && "unexpected function without stack frame but with FP");
// All of the stack allocation is for locals.
AFI->setLocalStackSize(NumBytes);
// Label used to tie together the PROLOG_LABEL and the MachineMoves.
MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
// REDZONE: If the stack size is less than 128 bytes, we don't need
// to actually allocate.
if (NumBytes && !canUseRedZone(MF)) {
emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
MachineInstr::FrameSetup);
// Encode the stack size of the leaf function.
unsigned CFIIndex = MMI.addFrameInst(
MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
} else if (NumBytes) {
++NumRedZoneFunctions;
}
return;
}
// Only set up FP if we actually need to.
int FPOffset = 0;
if (HasFP) {
// First instruction must a) allocate the stack and b) have an immediate
// that is a multiple of -2.
assert((MBBI->getOpcode() == AArch64::STPXpre ||
MBBI->getOpcode() == AArch64::STPDpre) &&
MBBI->getOperand(3).getReg() == AArch64::SP &&
MBBI->getOperand(4).getImm() < 0 &&
(MBBI->getOperand(4).getImm() & 1) == 0);
// Frame pointer is fp = sp - 16. Since the STPXpre subtracts the space
// required for the callee saved register area we get the frame pointer
// by addding that offset - 16 = -getImm()*8 - 2*8 = -(getImm() + 2) * 8.
FPOffset = -(MBBI->getOperand(4).getImm() + 2) * 8;
assert(FPOffset >= 0 && "Bad Framepointer Offset");
}
// Move past the saves of the callee-saved registers.
while (MBBI->getOpcode() == AArch64::STPXi ||
MBBI->getOpcode() == AArch64::STPDi ||
MBBI->getOpcode() == AArch64::STPXpre ||
MBBI->getOpcode() == AArch64::STPDpre) {
++MBBI;
NumBytes -= 16;
}
assert(NumBytes >= 0 && "Negative stack allocation size!?");
if (HasFP) {
// Issue sub fp, sp, FPOffset or
// mov fp,sp when FPOffset is zero.
// Note: All stores of callee-saved registers are marked as "FrameSetup".
// This code marks the instruction(s) that set the FP also.
emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP, FPOffset, TII,
MachineInstr::FrameSetup);
}
// All of the remaining stack allocations are for locals.
AFI->setLocalStackSize(NumBytes);
// Allocate space for the rest of the frame.
if (NumBytes) {
// If we're a leaf function, try using the red zone.
if (!canUseRedZone(MF))
emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII,
MachineInstr::FrameSetup);
}
// If we need a base pointer, set it up here. It's whatever the value of the
// stack pointer is at this point. Any variable size objects will be allocated
// after this, so we can still use the base pointer to reference locals.
//
// FIXME: Clarify FrameSetup flags here.
// Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is
// needed.
//
if (RegInfo->hasBasePointer(MF))
TII->copyPhysReg(MBB, MBBI, DL, AArch64::X19, AArch64::SP, false);
//.........这里部分代码省略.........
示例9: switch
MachineInstrBuilder
MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
MachineBasicBlock::iterator I) const {
MachineInstrBuilder MIB;
// Certain branches have two forms: e.g beq $1, $zero, dest vs beqz $1, dest
// Pick the zero form of the branch for readable assembly and for greater
// branch distance in non-microMIPS mode.
// Additional MIPSR6 does not permit the use of register $zero for compact
// branches.
// FIXME: Certain atomic sequences on mips64 generate 32bit references to
// Mips::ZERO, which is incorrect. This test should be updated to use
// Subtarget.getABI().GetZeroReg() when those atomic sequences and others
// are fixed.
int ZeroOperandPosition = -1;
bool BranchWithZeroOperand = false;
if (I->isBranch() && !I->isPseudo()) {
auto TRI = I->getParent()->getParent()->getSubtarget().getRegisterInfo();
ZeroOperandPosition = I->findRegisterUseOperandIdx(Mips::ZERO, false, TRI);
BranchWithZeroOperand = ZeroOperandPosition != -1;
}
if (BranchWithZeroOperand) {
switch (NewOpc) {
case Mips::BEQC:
NewOpc = Mips::BEQZC;
break;
case Mips::BNEC:
NewOpc = Mips::BNEZC;
break;
case Mips::BGEC:
NewOpc = Mips::BGEZC;
break;
case Mips::BLTC:
NewOpc = Mips::BLTZC;
break;
case Mips::BEQC64:
NewOpc = Mips::BEQZC64;
break;
case Mips::BNEC64:
NewOpc = Mips::BNEZC64;
break;
}
}
MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));
// For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an
// immediate 0 as an operand and requires the removal of it's %RA<imp-def>
// implicit operand as copying the implicit operations of the instructio we're
// looking at will give us the correct flags.
if (NewOpc == Mips::JIC || NewOpc == Mips::JIALC || NewOpc == Mips::JIC64 ||
NewOpc == Mips::JIALC64) {
if (NewOpc == Mips::JIALC || NewOpc == Mips::JIALC64)
MIB->RemoveOperand(0);
for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
MIB.add(I->getOperand(J));
}
MIB.addImm(0);
} else {
for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) {
if (BranchWithZeroOperand && (unsigned)ZeroOperandPosition == J)
continue;
MIB.add(I->getOperand(J));
}
}
MIB.copyImplicitOps(*I);
MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
return MIB;
}
示例10: ComputeLocalLiveness
/// ComputeLocalLiveness - Computes liveness of registers within a basic
/// block, setting the killed/dead flags as appropriate.
void RALocal::ComputeLocalLiveness(MachineBasicBlock& MBB) {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
// Keep track of the most recently seen previous use or def of each reg,
// so that we can update them with dead/kill markers.
DenseMap<unsigned, std::pair<MachineInstr*, unsigned> > LastUseDef;
for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
I != E; ++I) {
if (I->isDebugValue())
continue;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
MachineOperand &MO = I->getOperand(i);
// Uses don't trigger any flags, but we need to save
// them for later. Also, we have to process these
// _before_ processing the defs, since an instr
// uses regs before it defs them.
if (!MO.isReg() || !MO.getReg() || !MO.isUse())
continue;
LastUseDef[MO.getReg()] = std::make_pair(I, i);
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) continue;
const unsigned *Aliases = TRI->getAliasSet(MO.getReg());
if (Aliases == 0)
continue;
while (*Aliases) {
DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
alias = LastUseDef.find(*Aliases);
if (alias != LastUseDef.end() && alias->second.first != I)
LastUseDef[*Aliases] = std::make_pair(I, i);
++Aliases;
}
}
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
MachineOperand &MO = I->getOperand(i);
// Defs others than 2-addr redefs _do_ trigger flag changes:
// - A def followed by a def is dead
// - A use followed by a def is a kill
if (!MO.isReg() || !MO.getReg() || !MO.isDef()) continue;
DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
last = LastUseDef.find(MO.getReg());
if (last != LastUseDef.end()) {
// Check if this is a two address instruction. If so, then
// the def does not kill the use.
if (last->second.first == I &&
I->isRegTiedToUseOperand(i))
continue;
MachineOperand &lastUD =
last->second.first->getOperand(last->second.second);
if (lastUD.isDef())
lastUD.setIsDead(true);
else
lastUD.setIsKill(true);
}
LastUseDef[MO.getReg()] = std::make_pair(I, i);
}
}
// Live-out (of the function) registers contain return values of the function,
// so we need to make sure they are alive at return time.
MachineBasicBlock::iterator Ret = MBB.getFirstTerminator();
bool BBEndsInReturn = (Ret != MBB.end() && Ret->getDesc().isReturn());
if (BBEndsInReturn)
for (MachineRegisterInfo::liveout_iterator
I = MF->getRegInfo().liveout_begin(),
E = MF->getRegInfo().liveout_end(); I != E; ++I)
if (!Ret->readsRegister(*I)) {
Ret->addOperand(MachineOperand::CreateReg(*I, false, true));
LastUseDef[*I] = std::make_pair(Ret, Ret->getNumOperands()-1);
}
// Finally, loop over the final use/def of each reg
// in the block and determine if it is dead.
for (DenseMap<unsigned, std::pair<MachineInstr*, unsigned> >::iterator
I = LastUseDef.begin(), E = LastUseDef.end(); I != E; ++I) {
MachineInstr *MI = I->second.first;
unsigned idx = I->second.second;
MachineOperand &MO = MI->getOperand(idx);
bool isPhysReg = TargetRegisterInfo::isPhysicalRegister(MO.getReg());
// A crude approximation of "live-out" calculation
bool usedOutsideBlock = isPhysReg ? false :
UsedInMultipleBlocks.test(MO.getReg() -
TargetRegisterInfo::FirstVirtualRegister);
// If the machine BB ends in a return instruction, then the value isn't used
// outside of the BB.
if (!isPhysReg && (!usedOutsideBlock || BBEndsInReturn)) {
//.........这里部分代码省略.........
示例11: fuseCompareOperations
// Try to fuse comparison instruction Compare into a later branch.
// Return true on success and if Compare is therefore redundant.
bool SystemZElimCompare::fuseCompareOperations(
MachineInstr &Compare, SmallVectorImpl<MachineInstr *> &CCUsers) {
// See whether we have a single branch with which to fuse.
if (CCUsers.size() != 1)
return false;
MachineInstr *Branch = CCUsers[0];
SystemZII::FusedCompareType Type;
switch (Branch->getOpcode()) {
case SystemZ::BRC:
Type = SystemZII::CompareAndBranch;
break;
case SystemZ::CondReturn:
Type = SystemZII::CompareAndReturn;
break;
case SystemZ::CallBCR:
Type = SystemZII::CompareAndSibcall;
break;
case SystemZ::CondTrap:
Type = SystemZII::CompareAndTrap;
break;
default:
return false;
}
// See whether we have a comparison that can be fused.
unsigned FusedOpcode =
TII->getFusedCompare(Compare.getOpcode(), Type, &Compare);
if (!FusedOpcode)
return false;
// Make sure that the operands are available at the branch.
// SrcReg2 is the register if the source operand is a register,
// 0 if the source operand is immediate, and the base register
// if the source operand is memory (index is not supported).
unsigned SrcReg = Compare.getOperand(0).getReg();
unsigned SrcReg2 =
Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : 0;
MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
for (++MBBI; MBBI != MBBE; ++MBBI)
if (MBBI->modifiesRegister(SrcReg, TRI) ||
(SrcReg2 && MBBI->modifiesRegister(SrcReg2, TRI)))
return false;
// Read the branch mask, target (if applicable), regmask (if applicable).
MachineOperand CCMask(MBBI->getOperand(1));
assert((CCMask.getImm() & ~SystemZ::CCMASK_ICMP) == 0 &&
"Invalid condition-code mask for integer comparison");
// This is only valid for CompareAndBranch.
MachineOperand Target(MBBI->getOperand(
Type == SystemZII::CompareAndBranch ? 2 : 0));
const uint32_t *RegMask;
if (Type == SystemZII::CompareAndSibcall)
RegMask = MBBI->getOperand(2).getRegMask();
// Clear out all current operands.
int CCUse = MBBI->findRegisterUseOperandIdx(SystemZ::CC, false, TRI);
assert(CCUse >= 0 && "BRC/BCR must use CC");
Branch->RemoveOperand(CCUse);
// Remove target (branch) or regmask (sibcall).
if (Type == SystemZII::CompareAndBranch ||
Type == SystemZII::CompareAndSibcall)
Branch->RemoveOperand(2);
Branch->RemoveOperand(1);
Branch->RemoveOperand(0);
// Rebuild Branch as a fused compare and branch.
// SrcNOps is the number of MI operands of the compare instruction
// that we need to copy over.
unsigned SrcNOps = 2;
if (FusedOpcode == SystemZ::CLT || FusedOpcode == SystemZ::CLGT)
SrcNOps = 3;
Branch->setDesc(TII->get(FusedOpcode));
MachineInstrBuilder MIB(*Branch->getParent()->getParent(), Branch);
for (unsigned I = 0; I < SrcNOps; I++)
MIB.addOperand(Compare.getOperand(I));
MIB.addOperand(CCMask);
if (Type == SystemZII::CompareAndBranch) {
// Only conditional branches define CC, as they may be converted back
// to a non-fused branch because of a long displacement. Conditional
// returns don't have that problem.
MIB.addOperand(Target)
.addReg(SystemZ::CC, RegState::ImplicitDefine | RegState::Dead);
}
if (Type == SystemZII::CompareAndSibcall)
MIB.addRegMask(RegMask);
// Clear any intervening kills of SrcReg and SrcReg2.
MBBI = Compare;
for (++MBBI; MBBI != MBBE; ++MBBI) {
MBBI->clearRegisterKills(SrcReg, TRI);
if (SrcReg2)
MBBI->clearRegisterKills(SrcReg2, TRI);
}
FusedComparisons += 1;
return true;
}
示例12: switch
/// If \p MBBI is a pseudo instruction, this method expands
/// it to the corresponding (sequence of) actual instruction(s).
/// \returns true if \p MBBI has been expanded.
bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) {
MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode();
DebugLoc DL = MBBI->getDebugLoc();
switch (Opcode) {
default:
return false;
case X86::TCRETURNdi:
case X86::TCRETURNri:
case X86::TCRETURNmi:
case X86::TCRETURNdi64:
case X86::TCRETURNri64:
case X86::TCRETURNmi64: {
bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
MachineOperand &JumpTarget = MBBI->getOperand(0);
MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
assert(StackAdjust.isImm() && "Expecting immediate value.");
// Adjust stack pointer.
int StackAdj = StackAdjust.getImm();
if (StackAdj) {
// Check for possible merge with preceding ADD instruction.
StackAdj += X86FL->mergeSPUpdates(MBB, MBBI, true);
X86FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
}
// Jump to label or value in register.
bool IsWin64 = STI->isTargetWin64();
if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {
unsigned Op = (Opcode == X86::TCRETURNdi)
? X86::TAILJMPd
: (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
if (JumpTarget.isGlobal())
MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
JumpTarget.getTargetFlags());
else {
assert(JumpTarget.isSymbol());
MIB.addExternalSymbol(JumpTarget.getSymbolName(),
JumpTarget.getTargetFlags());
}
} else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
unsigned Op = (Opcode == X86::TCRETURNmi)
? X86::TAILJMPm
: (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op));
for (unsigned i = 0; i != 5; ++i)
MIB.addOperand(MBBI->getOperand(i));
} else if (Opcode == X86::TCRETURNri64) {
BuildMI(MBB, MBBI, DL,
TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
.addReg(JumpTarget.getReg(), RegState::Kill);
} else {
BuildMI(MBB, MBBI, DL, TII->get(X86::TAILJMPr))
.addReg(JumpTarget.getReg(), RegState::Kill);
}
MachineInstr *NewMI = std::prev(MBBI);
NewMI->copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
// Delete the pseudo instruction TCRETURN.
MBB.erase(MBBI);
return true;
}
case X86::EH_RETURN:
case X86::EH_RETURN64: {
MachineOperand &DestAddr = MBBI->getOperand(0);
assert(DestAddr.isReg() && "Offset should be in register!");
const bool Uses64BitFramePtr =
STI->isTarget64BitLP64() || STI->isTargetNaCl64();
unsigned StackPtr = TRI->getStackRegister();
BuildMI(MBB, MBBI, DL,
TII->get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), StackPtr)
.addReg(DestAddr.getReg());
// The EH_RETURN pseudo is really removed during the MC Lowering.
return true;
}
case X86::IRET: {
// Adjust stack to erase error code
int64_t StackAdj = MBBI->getOperand(0).getImm();
X86FL->emitSPUpdate(MBB, MBBI, StackAdj, true);
// Replace pseudo with machine iret
BuildMI(MBB, MBBI, DL,
TII->get(STI->is64Bit() ? X86::IRET64 : X86::IRET32));
MBB.erase(MBBI);
return true;
}
case X86::RET: {
// Adjust stack to erase error code
int64_t StackAdj = MBBI->getOperand(0).getImm();
MachineInstrBuilder MIB;
if (StackAdj == 0) {
MIB = BuildMI(MBB, MBBI, DL,
TII->get(STI->is64Bit() ? X86::RETQ : X86::RETL));
//.........这里部分代码省略.........
示例13: SplitPHIEdges
bool PHIElimination::SplitPHIEdges(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineLoopInfo *MLI) {
if (MBB.empty() || !MBB.front().isPHI() || MBB.isLandingPad())
return false; // Quick exit for basic blocks without PHIs.
const MachineLoop *CurLoop = MLI ? MLI->getLoopFor(&MBB) : 0;
bool IsLoopHeader = CurLoop && &MBB == CurLoop->getHeader();
bool Changed = false;
for (MachineBasicBlock::iterator BBI = MBB.begin(), BBE = MBB.end();
BBI != BBE && BBI->isPHI(); ++BBI) {
for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) {
unsigned Reg = BBI->getOperand(i).getReg();
MachineBasicBlock *PreMBB = BBI->getOperand(i+1).getMBB();
// Is there a critical edge from PreMBB to MBB?
if (PreMBB->succ_size() == 1)
continue;
// Avoid splitting backedges of loops. It would introduce small
// out-of-line blocks into the loop which is very bad for code placement.
if (PreMBB == &MBB && !SplitAllCriticalEdges)
continue;
const MachineLoop *PreLoop = MLI ? MLI->getLoopFor(PreMBB) : 0;
if (IsLoopHeader && PreLoop == CurLoop && !SplitAllCriticalEdges)
continue;
// LV doesn't consider a phi use live-out, so isLiveOut only returns true
// when the source register is live-out for some other reason than a phi
// use. That means the copy we will insert in PreMBB won't be a kill, and
// there is a risk it may not be coalesced away.
//
// If the copy would be a kill, there is no need to split the edge.
if (!isLiveOutPastPHIs(Reg, PreMBB) && !SplitAllCriticalEdges)
continue;
DEBUG(dbgs() << PrintReg(Reg) << " live-out before critical edge BB#"
<< PreMBB->getNumber() << " -> BB#" << MBB.getNumber()
<< ": " << *BBI);
// If Reg is not live-in to MBB, it means it must be live-in to some
// other PreMBB successor, and we can avoid the interference by splitting
// the edge.
//
// If Reg *is* live-in to MBB, the interference is inevitable and a copy
// is likely to be left after coalescing. If we are looking at a loop
// exiting edge, split it so we won't insert code in the loop, otherwise
// don't bother.
bool ShouldSplit = !isLiveIn(Reg, &MBB) || SplitAllCriticalEdges;
// Check for a loop exiting edge.
if (!ShouldSplit && CurLoop != PreLoop) {
DEBUG({
dbgs() << "Split wouldn't help, maybe avoid loop copies?\n";
if (PreLoop) dbgs() << "PreLoop: " << *PreLoop;
if (CurLoop) dbgs() << "CurLoop: " << *CurLoop;
});
// This edge could be entering a loop, exiting a loop, or it could be
// both: Jumping directly form one loop to the header of a sibling
// loop.
// Split unless this edge is entering CurLoop from an outer loop.
ShouldSplit = PreLoop && !PreLoop->contains(CurLoop);
}
if (!ShouldSplit)
continue;
if (!PreMBB->SplitCriticalEdge(&MBB, this)) {
DEBUG(dbgs() << "Failed to split ciritcal edge.\n");
continue;
}
Changed = true;
++NumCriticalEdgesSplit;
}
示例14: while
void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
MachineFrameInfo *MFI = MF.getFrameInfo();
const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL;
bool IsTailCallReturn = false;
if (MBB.end() != MBBI) {
DL = MBBI->getDebugLoc();
unsigned RetOpcode = MBBI->getOpcode();
IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
RetOpcode == AArch64::TCRETURNri;
}
int NumBytes = MFI->getStackSize();
const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
// All calls are tail calls in GHC calling conv, and functions have no
// prologue/epilogue.
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
return;
// Initial and residual are named for consistency with the prologue. Note that
// in the epilogue, the residual adjustment is executed first.
uint64_t ArgumentPopSize = 0;
if (IsTailCallReturn) {
MachineOperand &StackAdjust = MBBI->getOperand(1);
// For a tail-call in a callee-pops-arguments environment, some or all of
// the stack may actually be in use for the call's arguments, this is
// calculated during LowerCall and consumed here...
ArgumentPopSize = StackAdjust.getImm();
} else {
// ... otherwise the amount to pop is *all* of the argument space,
// conveniently stored in the MachineFunctionInfo by
// LowerFormalArguments. This will, of course, be zero for the C calling
// convention.
ArgumentPopSize = AFI->getArgumentStackToRestore();
}
// The stack frame should be like below,
//
// ---------------------- ---
// | | |
// | BytesInStackArgArea| CalleeArgStackSize
// | (NumReusableBytes) | (of tail call)
// | | ---
// | | |
// ---------------------| --- |
// | | | |
// | CalleeSavedReg | | |
// | (NumRestores * 8) | | |
// | | | |
// ---------------------| | NumBytes
// | | StackSize (StackAdjustUp)
// | LocalStackSize | | |
// | (covering callee | | |
// | args) | | |
// | | | |
// ---------------------- --- ---
//
// So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize
// = StackSize + ArgumentPopSize
//
// AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
// it as the 2nd argument of AArch64ISD::TC_RETURN.
NumBytes += ArgumentPopSize;
unsigned NumRestores = 0;
// Move past the restores of the callee-saved registers.
MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator();
const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
MachineBasicBlock::iterator Begin = MBB.begin();
while (LastPopI != Begin) {
--LastPopI;
unsigned Restores = getNumCSRestores(*LastPopI, CSRegs);
NumRestores += Restores;
if (Restores == 0) {
++LastPopI;
break;
}
}
NumBytes -= NumRestores * 8;
assert(NumBytes >= 0 && "Negative stack allocation size!?");
if (!hasFP(MF)) {
// If this was a redzone leaf function, we don't need to restore the
// stack pointer.
if (!canUseRedZone(MF))
emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes,
TII);
return;
}
// Restore the original stack pointer.
// FIXME: Rather than doing the math here, we should instead just use
// non-post-indexed loads for the restores if we aren't actually going to
// be able to save any instructions.
if (NumBytes || MFI->hasVarSizedObjects())
//.........这里部分代码省略.........
示例15: reMaterializeFor
/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
MachineBasicBlock::iterator MI) {
SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
if (!ParentVNI) {
DEBUG(dbgs() << "\tadding <undef> flags: ");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
MO.setIsUndef();
}
DEBUG(dbgs() << UseIdx << '\t' << *MI);
return true;
}
if (SnippetCopies.count(MI))
return false;
// Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
LiveRangeEdit::Remat RM(ParentVNI);
SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);
if (SibI != SibValues.end())
RM.OrigMI = SibI->second.DefMI;
if (!Edit->canRematerializeAt(RM, UseIdx, false)) {
markValueUsed(&VirtReg, ParentVNI);
DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
return false;
}
// If the instruction also writes VirtReg.reg, it had better not require the
// same register for uses and defs.
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
if (RI.Tied) {
markValueUsed(&VirtReg, ParentVNI);
DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
return false;
}
// Before rematerializing into a register for a single instruction, try to
// fold a load into the instruction. That avoids allocating a new register.
if (RM.OrigMI->canFoldAsLoad() &&
foldMemoryOperand(Ops, RM.OrigMI)) {
Edit->markRematerialized(RM.ParentVNI);
++NumFoldedLoads;
return true;
}
// Alocate a new register for the remat.
unsigned NewVReg = Edit->createFrom(Original);
// Finally we can rematerialize OrigMI before MI.
SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewVReg, RM,
TRI);
(void)DefIdx;
DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
<< *LIS.getInstructionFromIndex(DefIdx));
// Replace operands
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(Ops[i].second);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
MO.setReg(NewVReg);
MO.setIsKill();
}
}
DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI << '\n');
++NumRemats;
return true;
}