本文整理汇总了C++中SlotIndex类的典型用法代码示例。如果您正苦于以下问题:C++ SlotIndex类的具体用法?C++ SlotIndex怎么用?C++ SlotIndex使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SlotIndex类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: assert
/// LowerPHINode - Lower the PHI node at the top of the specified block,
///
void PHIElimination::LowerPHINode(MachineBasicBlock &MBB,
MachineBasicBlock::iterator LastPHIIt) {
++NumLowered;
MachineBasicBlock::iterator AfterPHIsIt = std::next(LastPHIIt);
// Unlink the PHI node from the basic block, but don't delete the PHI yet.
MachineInstr *MPhi = MBB.remove(MBB.begin());
unsigned NumSrcs = (MPhi->getNumOperands() - 1) / 2;
unsigned DestReg = MPhi->getOperand(0).getReg();
assert(MPhi->getOperand(0).getSubReg() == 0 && "Can't handle sub-reg PHIs");
bool isDead = MPhi->getOperand(0).isDead();
// Create a new register for the incoming PHI arguments.
MachineFunction &MF = *MBB.getParent();
unsigned IncomingReg = 0;
bool reusedIncoming = false; // Is IncomingReg reused from an earlier PHI?
// Insert a register to register copy at the top of the current block (but
// after any remaining phi nodes) which copies the new incoming register
// into the phi node destination.
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
if (isSourceDefinedByImplicitDef(MPhi, MRI))
// If all sources of a PHI node are implicit_def, just emit an
// implicit_def instead of a copy.
BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
TII->get(TargetOpcode::IMPLICIT_DEF), DestReg);
else {
// Can we reuse an earlier PHI node? This only happens for critical edges,
// typically those created by tail duplication.
unsigned &entry = LoweredPHIs[MPhi];
if (entry) {
// An identical PHI node was already lowered. Reuse the incoming register.
IncomingReg = entry;
reusedIncoming = true;
++NumReused;
DEBUG(dbgs() << "Reusing " << PrintReg(IncomingReg) << " for " << *MPhi);
} else {
const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg);
entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC);
}
BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(),
TII->get(TargetOpcode::COPY), DestReg)
.addReg(IncomingReg);
}
// Update live variable information if there is any.
if (LV) {
MachineInstr *PHICopy = std::prev(AfterPHIsIt);
if (IncomingReg) {
LiveVariables::VarInfo &VI = LV->getVarInfo(IncomingReg);
// Increment use count of the newly created virtual register.
LV->setPHIJoin(IncomingReg);
// When we are reusing the incoming register, it may already have been
// killed in this block. The old kill will also have been inserted at
// AfterPHIsIt, so it appears before the current PHICopy.
if (reusedIncoming)
if (MachineInstr *OldKill = VI.findKill(&MBB)) {
DEBUG(dbgs() << "Remove old kill from " << *OldKill);
LV->removeVirtualRegisterKilled(IncomingReg, OldKill);
DEBUG(MBB.dump());
}
// Add information to LiveVariables to know that the incoming value is
// killed. Note that because the value is defined in several places (once
// each for each incoming block), the "def" block and instruction fields
// for the VarInfo is not filled in.
LV->addVirtualRegisterKilled(IncomingReg, PHICopy);
}
// Since we are going to be deleting the PHI node, if it is the last use of
// any registers, or if the value itself is dead, we need to move this
// information over to the new copy we just inserted.
LV->removeVirtualRegistersKilled(MPhi);
// If the result is dead, update LV.
if (isDead) {
LV->addVirtualRegisterDead(DestReg, PHICopy);
LV->removeVirtualRegisterDead(DestReg, MPhi);
}
}
// Update LiveIntervals for the new copy or implicit def.
if (LIS) {
MachineInstr *NewInstr = std::prev(AfterPHIsIt);
SlotIndex DestCopyIndex = LIS->InsertMachineInstrInMaps(NewInstr);
SlotIndex MBBStartIndex = LIS->getMBBStartIdx(&MBB);
if (IncomingReg) {
// Add the region from the beginning of MBB to the copy instruction to
// IncomingReg's live interval.
LiveInterval &IncomingLI = LIS->createEmptyInterval(IncomingReg);
VNInfo *IncomingVNI = IncomingLI.getVNInfoAt(MBBStartIndex);
if (!IncomingVNI)
//.........这里部分代码省略.........
示例2: getParent
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
// Splitting the critical edge to a landing pad block is non-trivial. Don't do
// it in this generic function.
if (Succ->isLandingPad())
return nullptr;
MachineFunction *MF = getParent();
DebugLoc dl; // FIXME: this is nowhere
// Performance might be harmed on HW that implements branching using exec mask
// where both sides of the branches are always executed.
if (MF->getTarget().requiresStructuredCFG())
return nullptr;
// We may need to update this's terminator, but we can't do that if
// AnalyzeBranch fails. If this uses a jump table, we won't touch it.
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
return nullptr;
// Avoid bugpoint weirdness: A block may end with a conditional branch but
// jumps to the same MBB is either case. We have duplicate CFG edges in that
// case that we can't handle. Since this never happens in properly optimized
// code, just skip those edges.
if (TBB && TBB == FBB) {
DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
<< getNumber() << '\n');
return nullptr;
}
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
MF->insert(std::next(MachineFunction::iterator(this)), NMBB);
DEBUG(dbgs() << "Splitting critical edge:"
" BB#" << getNumber()
<< " -- BB#" << NMBB->getNumber()
<< " -- BB#" << Succ->getNumber() << '\n');
LiveIntervals *LIS = P->getAnalysisIfAvailable<LiveIntervals>();
SlotIndexes *Indexes = P->getAnalysisIfAvailable<SlotIndexes>();
if (LIS)
LIS->insertMBBInMaps(NMBB);
else if (Indexes)
Indexes->insertMBBInMaps(NMBB);
// On some targets like Mips, branches may kill virtual registers. Make sure
// that LiveVariables is properly updated after updateTerminator replaces the
// terminators.
LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();
// Collect a list of virtual registers killed by the terminators.
SmallVector<unsigned, 4> KilledRegs;
if (LV)
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I) {
MachineInstr *MI = I;
for (MachineInstr::mop_iterator OI = MI->operands_begin(),
OE = MI->operands_end(); OI != OE; ++OI) {
if (!OI->isReg() || OI->getReg() == 0 ||
!OI->isUse() || !OI->isKill() || OI->isUndef())
continue;
unsigned Reg = OI->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
LV->getVarInfo(Reg).removeKill(MI)) {
KilledRegs.push_back(Reg);
DEBUG(dbgs() << "Removing terminator kill: " << *MI);
OI->setIsKill(false);
}
}
}
SmallVector<unsigned, 4> UsedRegs;
if (LIS) {
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I) {
MachineInstr *MI = I;
for (MachineInstr::mop_iterator OI = MI->operands_begin(),
OE = MI->operands_end(); OI != OE; ++OI) {
if (!OI->isReg() || OI->getReg() == 0)
continue;
unsigned Reg = OI->getReg();
if (std::find(UsedRegs.begin(), UsedRegs.end(), Reg) == UsedRegs.end())
UsedRegs.push_back(Reg);
}
}
}
ReplaceUsesOfBlockWith(Succ, NMBB);
// If updateTerminator() removes instructions, we need to remove them from
// SlotIndexes.
SmallVector<MachineInstr*, 4> Terminators;
if (Indexes) {
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I)
Terminators.push_back(I);
//.........这里部分代码省略.........
示例3: report
void
MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
const MachineInstr *MI = MO->getParent();
const TargetInstrDesc &TI = MI->getDesc();
// The first TI.NumDefs operands must be explicit register defines
if (MONum < TI.getNumDefs()) {
if (!MO->isReg())
report("Explicit definition must be a register", MO, MONum);
else if (!MO->isDef())
report("Explicit definition marked as use", MO, MONum);
else if (MO->isImplicit())
report("Explicit definition marked as implicit", MO, MONum);
} else if (MONum < TI.getNumOperands()) {
// Don't check if it's the last operand in a variadic instruction. See,
// e.g., LDM_RET in the arm back end.
if (MO->isReg() && !(TI.isVariadic() && MONum == TI.getNumOperands()-1)) {
if (MO->isDef())
report("Explicit operand marked as def", MO, MONum);
if (MO->isImplicit())
report("Explicit operand marked as implicit", MO, MONum);
}
} else {
// ARM adds %reg0 operands to indicate predicates. We'll allow that.
if (MO->isReg() && !MO->isImplicit() && !TI.isVariadic() && MO->getReg())
report("Extra explicit operand on non-variadic instruction", MO, MONum);
}
switch (MO->getType()) {
case MachineOperand::MO_Register: {
const unsigned Reg = MO->getReg();
if (!Reg)
return;
// Check Live Variables.
if (MO->isUndef()) {
// An <undef> doesn't refer to any register, so just skip it.
} else if (MO->isUse()) {
regsLiveInButUnused.erase(Reg);
bool isKill = false;
unsigned defIdx;
if (MI->isRegTiedToDefOperand(MONum, &defIdx)) {
// A two-addr use counts as a kill if use and def are the same.
unsigned DefReg = MI->getOperand(defIdx).getReg();
if (Reg == DefReg) {
isKill = true;
// And in that case an explicit kill flag is not allowed.
if (MO->isKill())
report("Illegal kill flag on two-address instruction operand",
MO, MONum);
} else if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
report("Two-address instruction operands must be identical",
MO, MONum);
}
} else
isKill = MO->isKill();
if (isKill)
addRegWithSubRegs(regsKilled, Reg);
// Check that LiveVars knows this kill.
if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
MO->isKill()) {
LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
if (std::find(VI.Kills.begin(),
VI.Kills.end(), MI) == VI.Kills.end())
report("Kill missing from LiveVariables", MO, MONum);
}
// Check LiveInts liveness and kill.
if (TargetRegisterInfo::isVirtualRegister(Reg) &&
LiveInts && !LiveInts->isNotInMIMap(MI)) {
SlotIndex UseIdx = LiveInts->getInstructionIndex(MI).getUseIndex();
if (LiveInts->hasInterval(Reg)) {
const LiveInterval &LI = LiveInts->getInterval(Reg);
if (!LI.liveAt(UseIdx)) {
report("No live range at use", MO, MONum);
*OS << UseIdx << " is not live in " << LI << '\n';
}
// Verify isKill == LI.killedAt.
if (!MI->isRegTiedToDefOperand(MONum)) {
// MI could kill register without a kill flag on MO.
bool miKill = MI->killsRegister(Reg);
bool liKill = LI.killedAt(UseIdx.getDefIndex());
if (miKill && !liKill) {
report("Live range continues after kill flag", MO, MONum);
*OS << "Live range: " << LI << '\n';
}
if (!miKill && liKill) {
report("Live range ends without kill flag", MO, MONum);
*OS << "Live range: " << LI << '\n';
}
}
} else {
report("Virtual register has no Live interval", MO, MONum);
}
}
// Use of a dead register.
//.........这里部分代码省略.........
示例4: assert
void LiveInterval::constructMainRangeFromSubranges(
const SlotIndexes &Indexes, VNInfo::Allocator &VNIAllocator) {
// The basic observations on which this algorithm is based:
// - Each Def/ValNo in a subrange must have a corresponding def on the main
// range, but not further defs/valnos are necessary.
// - If any of the subranges is live at a point the main liverange has to be
// live too, conversily if no subrange is live the main range mustn't be
// live either.
// We do this by scannig through all the subranges simultaneously creating new
// segments in the main range as segments start/ends come up in the subranges.
assert(hasSubRanges() && "expected subranges to be present");
assert(segments.empty() && valnos.empty() && "expected empty main range");
// Collect subrange, iterator pairs for the walk and determine first and last
// SlotIndex involved.
SmallVector<std::pair<const SubRange*, const_iterator>, 4> SRs;
SlotIndex First;
SlotIndex Last;
for (const SubRange &SR : subranges()) {
if (SR.empty())
continue;
SRs.push_back(std::make_pair(&SR, SR.begin()));
if (!First.isValid() || SR.segments.front().start < First)
First = SR.segments.front().start;
if (!Last.isValid() || SR.segments.back().end > Last)
Last = SR.segments.back().end;
}
// Walk over all subranges simultaneously.
Segment CurrentSegment;
bool ConstructingSegment = false;
bool NeedVNIFixup = false;
unsigned ActiveMask = 0;
SlotIndex Pos = First;
while (true) {
SlotIndex NextPos = Last;
enum {
NOTHING,
BEGIN_SEGMENT,
END_SEGMENT,
} Event = NOTHING;
// Which subregister lanes are affected by the current event.
unsigned EventMask = 0;
// Whether a BEGIN_SEGMENT is also a valno definition point.
bool IsDef = false;
// Find the next begin or end of a subrange segment. Combine masks if we
// have multiple begins/ends at the same position. Ends take precedence over
// Begins.
for (auto &SRP : SRs) {
const SubRange &SR = *SRP.first;
const_iterator &I = SRP.second;
// Advance iterator of subrange to a segment involving Pos; the earlier
// segments are already merged at this point.
while (I != SR.end() &&
(I->end < Pos ||
(I->end == Pos && (ActiveMask & SR.LaneMask) == 0)))
++I;
if (I == SR.end())
continue;
if ((ActiveMask & SR.LaneMask) == 0 &&
Pos <= I->start && I->start <= NextPos) {
// Merge multiple begins at the same position.
if (I->start == NextPos && Event == BEGIN_SEGMENT) {
EventMask |= SR.LaneMask;
IsDef |= I->valno->def == I->start;
} else if (I->start < NextPos || Event != END_SEGMENT) {
Event = BEGIN_SEGMENT;
NextPos = I->start;
EventMask = SR.LaneMask;
IsDef = I->valno->def == I->start;
}
}
if ((ActiveMask & SR.LaneMask) != 0 &&
Pos <= I->end && I->end <= NextPos) {
// Merge multiple ends at the same position.
if (I->end == NextPos && Event == END_SEGMENT)
EventMask |= SR.LaneMask;
else {
Event = END_SEGMENT;
NextPos = I->end;
EventMask = SR.LaneMask;
}
}
}
// Advance scan position.
Pos = NextPos;
if (Event == BEGIN_SEGMENT) {
if (ConstructingSegment && IsDef) {
// Finish previous segment because we have to start a new one.
CurrentSegment.end = Pos;
append(CurrentSegment);
ConstructingSegment = false;
}
// Start a new segment if necessary.
if (!ConstructingSegment) {
// Determine value number for the segment.
VNInfo *VNI;
if (IsDef) {
//.........这里部分代码省略.........
示例5: assert
// mapValue - Find the mapped value for ParentVNI at Idx.
// Potentially create phi-def values.
VNInfo *LiveIntervalMap::mapValue(const VNInfo *ParentVNI, SlotIndex Idx) {
assert(ParentVNI && "Mapping NULL value");
assert(Idx.isValid() && "Invalid SlotIndex");
assert(parentli_.getVNInfoAt(Idx) == ParentVNI && "Bad ParentVNI");
// Use insert for lookup, so we can add missing values with a second lookup.
std::pair<ValueMap::iterator,bool> InsP =
valueMap_.insert(ValueMap::value_type(ParentVNI, static_cast<VNInfo *>(0)));
// The static_cast<VNInfo *> is only needed to work around a bug in an
// old version of the C++0x standard which the following compilers
// implemented and have yet to fix:
//
// Microsoft Visual Studio 2010 Version 10.0.30319.1 RTMRel
// Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.30319.01
//
// If/When we move to C++0x, this can be replaced by nullptr.
// This was an unknown value. Create a simple mapping.
if (InsP.second)
return InsP.first->second = li_.createValueCopy(ParentVNI,
lis_.getVNInfoAllocator());
// This was a simple mapped value.
if (InsP.first->second)
return InsP.first->second;
// This is a complex mapped value. There may be multiple defs, and we may need
// to create phi-defs.
MachineBasicBlock *IdxMBB = lis_.getMBBFromIndex(Idx);
assert(IdxMBB && "No MBB at Idx");
// Is there a def in the same MBB we can extend?
if (VNInfo *VNI = extendTo(IdxMBB, Idx))
return VNI;
// Now for the fun part. We know that ParentVNI potentially has multiple defs,
// and we may need to create even more phi-defs to preserve VNInfo SSA form.
// Perform a depth-first search for predecessor blocks where we know the
// dominating VNInfo. Insert phi-def VNInfos along the path back to IdxMBB.
// Track MBBs where we have created or learned the dominating value.
// This may change during the DFS as we create new phi-defs.
typedef DenseMap<MachineBasicBlock*, VNInfo*> MBBValueMap;
MBBValueMap DomValue;
for (idf_iterator<MachineBasicBlock*>
IDFI = idf_begin(IdxMBB),
IDFE = idf_end(IdxMBB); IDFI != IDFE;) {
MachineBasicBlock *MBB = *IDFI;
SlotIndex End = lis_.getMBBEndIdx(MBB);
// We are operating on the restricted CFG where ParentVNI is live.
if (parentli_.getVNInfoAt(End.getPrevSlot()) != ParentVNI) {
IDFI.skipChildren();
continue;
}
// Do we have a dominating value in this block?
VNInfo *VNI = extendTo(MBB, End);
if (!VNI) {
++IDFI;
continue;
}
// Yes, VNI dominates MBB. Track the path back to IdxMBB, creating phi-defs
// as needed along the way.
for (unsigned PI = IDFI.getPathLength()-1; PI != 0; --PI) {
// Start from MBB's immediate successor. End at IdxMBB.
MachineBasicBlock *Succ = IDFI.getPath(PI-1);
std::pair<MBBValueMap::iterator, bool> InsP =
DomValue.insert(MBBValueMap::value_type(Succ, VNI));
// This is the first time we backtrack to Succ.
if (InsP.second)
continue;
// We reached Succ again with the same VNI. Nothing is going to change.
VNInfo *OVNI = InsP.first->second;
if (OVNI == VNI)
break;
// Succ already has a phi-def. No need to continue.
SlotIndex Start = lis_.getMBBStartIdx(Succ);
if (OVNI->def == Start)
break;
// We have a collision between the old and new VNI at Succ. That means
// neither dominates and we need a new phi-def.
VNI = li_.getNextValue(Start, 0, true, lis_.getVNInfoAllocator());
VNI->setIsPHIDef(true);
InsP.first->second = VNI;
// Replace OVNI with VNI in the remaining path.
for (; PI > 1 ; --PI) {
MBBValueMap::iterator I = DomValue.find(IDFI.getPath(PI-2));
if (I == DomValue.end() || I->second != OVNI)
break;
I->second = VNI;
}
//.........这里部分代码省略.........
示例6: DEBUG
/// spillAroundUses - insert spill code around each use of Reg.
void InlineSpiller::spillAroundUses(unsigned Reg) {
DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n');
LiveInterval &OldLI = LIS.getInterval(Reg);
// Iterate over instructions using Reg.
for (MachineRegisterInfo::reg_bundle_iterator
RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end();
RegI != E; ) {
MachineInstr *MI = &*(RegI++);
// Debug values are not allowed to affect codegen.
if (MI->isDebugValue()) {
// Modify DBG_VALUE now that the value is in a spill slot.
bool IsIndirect = MI->isIndirectDebugValue();
uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0;
const MDNode *Var = MI->getDebugVariable();
const MDNode *Expr = MI->getDebugExpression();
DebugLoc DL = MI->getDebugLoc();
DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
MachineBasicBlock *MBB = MI->getParent();
assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
"Expected inlined-at fields to agree");
BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE))
.addFrameIndex(StackSlot)
.addImm(Offset)
.addMetadata(Var)
.addMetadata(Expr);
continue;
}
// Ignore copies to/from snippets. We'll delete them.
if (SnippetCopies.count(MI))
continue;
// Stack slot accesses may coalesce away.
if (coalesceStackAccess(MI, Reg))
continue;
// Analyze instruction.
SmallVector<std::pair<MachineInstr*, unsigned>, 8> Ops;
MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops);
// Find the slot index where this instruction reads and writes OldLI.
// This is usually the def slot, except for tied early clobbers.
SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getRegSlot(true)))
if (SlotIndex::isSameInstr(Idx, VNI->def))
Idx = VNI->def;
// Check for a sibling copy.
unsigned SibReg = isFullCopyOf(MI, Reg);
if (SibReg && isSibling(SibReg)) {
// This may actually be a copy between snippets.
if (isRegToSpill(SibReg)) {
DEBUG(dbgs() << "Found new snippet copy: " << *MI);
SnippetCopies.insert(MI);
continue;
}
if (RI.Writes) {
// Hoist the spill of a sib-reg copy.
if (hoistSpill(OldLI, MI)) {
// This COPY is now dead, the value is already in the stack slot.
MI->getOperand(0).setIsDead();
DeadDefs.push_back(MI);
continue;
}
} else {
// This is a reload for a sib-reg copy. Drop spills downstream.
LiveInterval &SibLI = LIS.getInterval(SibReg);
eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
// The COPY will fold to a reload below.
}
}
// Attempt to fold memory ops.
if (foldMemoryOperand(Ops))
continue;
// Create a new virtual register for spill/fill.
// FIXME: Infer regclass from instruction alone.
unsigned NewVReg = Edit->createFrom(Reg);
if (RI.Reads)
insertReload(NewVReg, Idx, MI);
// Rewrite instruction operands.
bool hasLiveDef = false;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = Ops[i].first->getOperand(Ops[i].second);
MO.setReg(NewVReg);
if (MO.isUse()) {
if (!Ops[i].first->isRegTiedToDefOperand(Ops[i].second))
MO.setIsKill();
} else {
if (!MO.isDead())
hasLiveDef = true;
}
}
//.........这里部分代码省略.........
示例7: MIBundleOperands
/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
MachineBasicBlock::iterator MI) {
// Analyze instruction
SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
MIBundleOperands::VirtRegInfo RI =
MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops);
if (!RI.Reads)
return false;
SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
if (!ParentVNI) {
DEBUG(dbgs() << "\tadding <undef> flags: ");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
MO.setIsUndef();
}
DEBUG(dbgs() << UseIdx << '\t' << *MI);
return true;
}
if (SnippetCopies.count(MI))
return false;
// Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
LiveRangeEdit::Remat RM(ParentVNI);
SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);
if (SibI != SibValues.end())
RM.OrigMI = SibI->second.DefMI;
if (!Edit->canRematerializeAt(RM, UseIdx, false)) {
markValueUsed(&VirtReg, ParentVNI);
DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
return false;
}
// If the instruction also writes VirtReg.reg, it had better not require the
// same register for uses and defs.
if (RI.Tied) {
markValueUsed(&VirtReg, ParentVNI);
DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
return false;
}
// Before rematerializing into a register for a single instruction, try to
// fold a load into the instruction. That avoids allocating a new register.
if (RM.OrigMI->canFoldAsLoad() &&
foldMemoryOperand(Ops, RM.OrigMI)) {
Edit->markRematerialized(RM.ParentVNI);
++NumFoldedLoads;
return true;
}
// Alocate a new register for the remat.
unsigned NewVReg = Edit->createFrom(Original);
// Finally we can rematerialize OrigMI before MI.
SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewVReg, RM,
TRI);
(void)DefIdx;
DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
<< *LIS.getInstructionFromIndex(DefIdx));
// Replace operands
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(Ops[i].second);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
MO.setReg(NewVReg);
MO.setIsKill();
}
}
DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI << '\n');
++NumRemats;
return true;
}
示例8: addReg
//.........这里部分代码省略.........
unsigned SrcReg = PHI->getOperand(1).getReg();
unsigned SrcColor = getRegColor(SrcReg);
unsigned NewReg = RegRenamingMap[SrcColor];
if (!NewReg) {
NewReg = SrcReg;
RegRenamingMap[SrcColor] = SrcReg;
}
MergeLIsAndRename(SrcReg, NewReg);
unsigned DestReg = PHI->getOperand(0).getReg();
if (!InsertedDestCopies.count(DestReg))
MergeLIsAndRename(DestReg, NewReg);
for (unsigned i = 3; i < PHI->getNumOperands(); i += 2) {
unsigned SrcReg = PHI->getOperand(i).getReg();
MergeLIsAndRename(SrcReg, NewReg);
}
++BBI;
LI->RemoveMachineInstrFromMaps(PHI);
PHI->eraseFromParent();
Changed = true;
}
}
// Due to the insertion of copies to split live ranges, the live intervals are
// guaranteed to not overlap, except in one case: an original PHI source and a
// PHI destination copy. In this case, they have the same value and thus don't
// truly intersect, so we merge them into the value live at that point.
// FIXME: Is there some better way we can handle this?
for (DestCopyMap::iterator I = InsertedDestCopies.begin(),
E = InsertedDestCopies.end(); I != E; ++I) {
unsigned DestReg = I->first;
unsigned DestColor = getRegColor(DestReg);
unsigned NewReg = RegRenamingMap[DestColor];
LiveInterval &DestLI = LI->getInterval(DestReg);
LiveInterval &NewLI = LI->getInterval(NewReg);
assert(DestLI.ranges.size() == 1
&& "PHI destination copy's live interval should be a single live "
"range from the beginning of the BB to the copy instruction.");
LiveRange *DestLR = DestLI.begin();
VNInfo *NewVNI = NewLI.getVNInfoAt(DestLR->start);
if (!NewVNI) {
NewVNI = NewLI.createValueCopy(DestLR->valno, LI->getVNInfoAllocator());
MachineInstr *CopyInstr = I->second;
CopyInstr->getOperand(1).setIsKill(true);
}
LiveRange NewLR(DestLR->start, DestLR->end, NewVNI);
NewLI.addRange(NewLR);
LI->removeInterval(DestReg);
MRI->replaceRegWith(DestReg, NewReg);
}
// Adjust the live intervals of all PHI source registers to handle the case
// where the PHIs in successor blocks were the only later uses of the source
// register.
for (SrcCopySet::iterator I = InsertedSrcCopySet.begin(),
E = InsertedSrcCopySet.end(); I != E; ++I) {
MachineBasicBlock *MBB = I->first;
unsigned SrcReg = I->second;
if (unsigned RenamedRegister = RegRenamingMap[getRegColor(SrcReg)])
SrcReg = RenamedRegister;
LiveInterval &SrcLI = LI->getInterval(SrcReg);
bool isLiveOut = false;
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end(); SI != SE; ++SI) {
if (SrcLI.liveAt(LI->getMBBStartIdx(*SI))) {
isLiveOut = true;
break;
}
}
if (isLiveOut)
continue;
MachineOperand *LastUse = findLastUse(MBB, SrcReg);
assert(LastUse);
SlotIndex LastUseIndex = LI->getInstructionIndex(LastUse->getParent());
SrcLI.removeRange(LastUseIndex.getDefIndex(), LI->getMBBEndIdx(MBB));
LastUse->setIsKill(true);
}
LI->renumber();
Allocator.Reset();
RegNodeMap.clear();
PHISrcDefs.clear();
InsertedSrcCopySet.clear();
InsertedSrcCopyMap.clear();
InsertedDestCopies.clear();
return Changed;
}
示例9: assert
void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI,
MachineBasicBlock *MBB) {
assert(PHI->isPHI());
++NumPHIsLowered;
unsigned PHIColor = getPHIColor(PHI);
for (unsigned i = 1; i < PHI->getNumOperands(); i += 2) {
MachineOperand &SrcMO = PHI->getOperand(i);
// If a source is defined by an implicit def, there is no need to insert a
// copy in the predecessor.
if (SrcMO.isUndef())
continue;
unsigned SrcReg = SrcMO.getReg();
assert(TargetRegisterInfo::isVirtualRegister(SrcReg) &&
"Machine PHI Operands must all be virtual registers!");
MachineBasicBlock *PredBB = PHI->getOperand(i + 1).getMBB();
unsigned SrcColor = getRegColor(SrcReg);
// If neither the PHI nor the operand were isolated, then we only need to
// set the phi-kill flag on the VNInfo at this PHI.
if (PHIColor && SrcColor == PHIColor) {
LiveInterval &SrcInterval = LI->getInterval(SrcReg);
SlotIndex PredIndex = LI->getMBBEndIdx(PredBB);
VNInfo *SrcVNI = SrcInterval.getVNInfoBefore(PredIndex);
assert(SrcVNI);
SrcVNI->setHasPHIKill(true);
continue;
}
unsigned CopyReg = 0;
if (PHIColor) {
SrcCopyMap::const_iterator I
= InsertedSrcCopyMap.find(std::make_pair(PredBB, PHIColor));
CopyReg
= I != InsertedSrcCopyMap.end() ? I->second->getOperand(0).getReg() : 0;
}
if (!CopyReg) {
const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
CopyReg = MRI->createVirtualRegister(RC);
MachineBasicBlock::iterator
CopyInsertPoint = findPHICopyInsertPoint(PredBB, MBB, SrcReg);
unsigned SrcSubReg = SrcMO.getSubReg();
MachineInstr *CopyInstr = BuildMI(*PredBB,
CopyInsertPoint,
PHI->getDebugLoc(),
TII->get(TargetOpcode::COPY),
CopyReg).addReg(SrcReg, 0, SrcSubReg);
LI->InsertMachineInstrInMaps(CopyInstr);
++NumSrcCopiesInserted;
// addLiveRangeToEndOfBlock() also adds the phikill flag to the VNInfo for
// the newly added range.
LI->addLiveRangeToEndOfBlock(CopyReg, CopyInstr);
InsertedSrcCopySet.insert(std::make_pair(PredBB, SrcReg));
addReg(CopyReg);
if (PHIColor) {
unionRegs(PHIColor, CopyReg);
assert(getRegColor(CopyReg) != CopyReg);
} else {
PHIColor = CopyReg;
assert(getRegColor(CopyReg) == CopyReg);
}
if (!InsertedSrcCopyMap.count(std::make_pair(PredBB, PHIColor)))
InsertedSrcCopyMap[std::make_pair(PredBB, PHIColor)] = CopyInstr;
}
SrcMO.setReg(CopyReg);
// If SrcReg is not live beyond the PHI, trim its interval so that it is no
// longer live-in to MBB. Note that SrcReg may appear in other PHIs that are
// processed later, but this is still correct to do at this point because we
// never rely on LiveIntervals being correct while inserting copies.
// FIXME: Should this just count uses at PHIs like the normal PHIElimination
// pass does?
LiveInterval &SrcLI = LI->getInterval(SrcReg);
SlotIndex MBBStartIndex = LI->getMBBStartIdx(MBB);
SlotIndex PHIIndex = LI->getInstructionIndex(PHI);
SlotIndex NextInstrIndex = PHIIndex.getNextIndex();
if (SrcLI.liveAt(MBBStartIndex) && SrcLI.expiredAt(NextInstrIndex))
SrcLI.removeRange(MBBStartIndex, PHIIndex, true);
}
unsigned DestReg = PHI->getOperand(0).getReg();
unsigned DestColor = getRegColor(DestReg);
if (PHIColor && DestColor == PHIColor) {
LiveInterval &DestLI = LI->getInterval(DestReg);
// Set the phi-def flag for the VN at this PHI.
SlotIndex PHIIndex = LI->getInstructionIndex(PHI);
VNInfo *DestVNI = DestLI.getVNInfoAt(PHIIndex.getDefIndex());
assert(DestVNI);
DestVNI->setIsPHIDef(true);
//.........这里部分代码省略.........
示例10: DEBUG
/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
/// reloading it.
bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
if (!OrigVNI) {
DEBUG(dbgs() << "\tadding <undef> flags: ");
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
MO.setIsUndef();
}
DEBUG(dbgs() << UseIdx << '\t' << *MI);
return true;
}
if (!reMattable_.count(OrigVNI)) {
DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
<< UseIdx << '\t' << *MI);
return false;
}
MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
usedValues_.insert(OrigVNI);
DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
return false;
}
// If the instruction also writes li_->reg, it had better not require the same
// register for uses and defs.
bool Reads, Writes;
SmallVector<unsigned, 8> Ops;
tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
if (Writes) {
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(Ops[i]);
if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
usedValues_.insert(OrigVNI);
DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
return false;
}
}
}
// Alocate a new register for the remat.
unsigned NewVReg = mri_.createVirtualRegister(rc_);
vrm_.grow();
LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
NewLI.markNotSpillable();
newIntervals_->push_back(&NewLI);
// Finally we can rematerialize OrigMI before MI.
MachineBasicBlock &MBB = *MI->getParent();
tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
MachineBasicBlock::iterator RematMI = MI;
SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
DEBUG(dbgs() << "\tremat: " << DefIdx << '\t' << *RematMI);
// Replace operands
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(Ops[i]);
if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
MO.setReg(NewVReg);
MO.setIsKill();
}
}
DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
lis_.getVNInfoAllocator());
NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
return true;
}
示例11: report
void
MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
const MachineInstr *MI = MO->getParent();
const MCInstrDesc &MCID = MI->getDesc();
const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
// The first MCID.NumDefs operands must be explicit register defines
if (MONum < MCID.getNumDefs()) {
if (!MO->isReg())
report("Explicit definition must be a register", MO, MONum);
else if (!MO->isDef())
report("Explicit definition marked as use", MO, MONum);
else if (MO->isImplicit())
report("Explicit definition marked as implicit", MO, MONum);
} else if (MONum < MCID.getNumOperands()) {
// Don't check if it's the last operand in a variadic instruction. See,
// e.g., LDM_RET in the arm back end.
if (MO->isReg() &&
!(MI->isVariadic() && MONum == MCID.getNumOperands()-1)) {
if (MO->isDef() && !MCOI.isOptionalDef())
report("Explicit operand marked as def", MO, MONum);
if (MO->isImplicit())
report("Explicit operand marked as implicit", MO, MONum);
}
} else {
// ARM adds %reg0 operands to indicate predicates. We'll allow that.
if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
report("Extra explicit operand on non-variadic instruction", MO, MONum);
}
switch (MO->getType()) {
case MachineOperand::MO_Register: {
const unsigned Reg = MO->getReg();
if (!Reg)
return;
// Check Live Variables.
if (MI->isDebugValue()) {
// Liveness checks are not valid for debug values.
} else if (MO->isUse() && !MO->isUndef()) {
regsLiveInButUnused.erase(Reg);
bool isKill = false;
unsigned defIdx;
if (MI->isRegTiedToDefOperand(MONum, &defIdx)) {
// A two-addr use counts as a kill if use and def are the same.
unsigned DefReg = MI->getOperand(defIdx).getReg();
if (Reg == DefReg)
isKill = true;
else if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
report("Two-address instruction operands must be identical",
MO, MONum);
}
} else
isKill = MO->isKill();
if (isKill)
addRegWithSubRegs(regsKilled, Reg);
// Check that LiveVars knows this kill.
if (LiveVars && TargetRegisterInfo::isVirtualRegister(Reg) &&
MO->isKill()) {
LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
if (std::find(VI.Kills.begin(),
VI.Kills.end(), MI) == VI.Kills.end())
report("Kill missing from LiveVariables", MO, MONum);
}
// Check LiveInts liveness and kill.
if (TargetRegisterInfo::isVirtualRegister(Reg) &&
LiveInts && !LiveInts->isNotInMIMap(MI)) {
SlotIndex UseIdx = LiveInts->getInstructionIndex(MI).getRegSlot(true);
if (LiveInts->hasInterval(Reg)) {
const LiveInterval &LI = LiveInts->getInterval(Reg);
if (!LI.liveAt(UseIdx)) {
report("No live range at use", MO, MONum);
*OS << UseIdx << " is not live in " << LI << '\n';
}
// Check for extra kill flags.
// Note that we allow missing kill flags for now.
if (MO->isKill() && !LI.killedAt(UseIdx.getRegSlot())) {
report("Live range continues after kill flag", MO, MONum);
*OS << "Live range: " << LI << '\n';
}
} else {
report("Virtual register has no Live interval", MO, MONum);
}
}
// Use of a dead register.
if (!regsLive.count(Reg)) {
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
// Reserved registers may be used even when 'dead'.
if (!isReserved(Reg))
report("Using an undefined physical register", MO, MONum);
} else {
BBInfo &MInfo = MBBInfoMap[MI->getParent()];
// We don't know which virtual registers are live in, so only complain
// if vreg was killed in this MBB. Otherwise keep track of vregs that
// must be live in. PHI instructions are handled separately.
//.........这里部分代码省略.........
示例12: assert
MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
unsigned EltSize) {
MachineBasicBlock *MBB = I->getParent();
// Be sure to use .addOperand(), and not .addReg() with these. We want to be
// sure we preserve the subregister index and any register flags set on them.
const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
const MachineOperand *Data1
= TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
unsigned Offset0
= TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
unsigned Offset1
= TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
unsigned NewOffset0 = Offset0 / EltSize;
unsigned NewOffset1 = Offset1 / EltSize;
unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
// Prefer the st64 form if we can use it, even if we can fit the offset in the
// non st64 version. I'm not sure if there's any real reason to do this.
bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
if (UseST64) {
NewOffset0 /= 64;
NewOffset1 /= 64;
Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
}
assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
(NewOffset0 != NewOffset1) &&
"Computed offset doesn't fit");
const MCInstrDesc &Write2Desc = TII->get(Opc);
DebugLoc DL = I->getDebugLoc();
// repairLiveintervalsInRange() doesn't handle physical register, so we have
// to update the M0 range manually.
SlotIndex PairedIndex = LIS->getInstructionIndex(*Paired);
LiveRange &M0Range = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::M0, TRI));
LiveRange::Segment *M0Segment = M0Range.getSegmentContaining(PairedIndex);
bool UpdateM0Range = M0Segment->end == PairedIndex.getRegSlot();
MachineInstrBuilder Write2
= BuildMI(*MBB, I, DL, Write2Desc)
.addOperand(*Addr) // addr
.addOperand(*Data0) // data0
.addOperand(*Data1) // data1
.addImm(NewOffset0) // offset0
.addImm(NewOffset1) // offset1
.addImm(0) // gds
.addMemOperand(*I->memoperands_begin())
.addMemOperand(*Paired->memoperands_begin());
// XXX - How do we express subregisters here?
unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg() };
LIS->RemoveMachineInstrFromMaps(*I);
LIS->RemoveMachineInstrFromMaps(*Paired);
I->eraseFromParent();
Paired->eraseFromParent();
// This doesn't handle physical registers like M0
LIS->repairIntervalsInRange(MBB, Write2, Write2, OrigRegs);
if (UpdateM0Range) {
SlotIndex Write2Index = LIS->getInstructionIndex(*Write2);
M0Segment->end = Write2Index.getRegSlot();
}
DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
return Write2.getInstr();
}
示例13: ReplaceDominatedUses
// Replace uses of FromReg with ToReg if they are dominated by MI.
static bool ReplaceDominatedUses(MachineBasicBlock &MBB, MachineInstr &MI,
unsigned FromReg, unsigned ToReg,
const MachineRegisterInfo &MRI,
MachineDominatorTree &MDT,
LiveIntervals &LIS) {
bool Changed = false;
LiveInterval *FromLI = &LIS.getInterval(FromReg);
LiveInterval *ToLI = &LIS.getInterval(ToReg);
SlotIndex FromIdx = LIS.getInstructionIndex(MI).getRegSlot();
VNInfo *FromVNI = FromLI->getVNInfoAt(FromIdx);
SmallVector<SlotIndex, 4> Indices;
for (auto I = MRI.use_nodbg_begin(FromReg), E = MRI.use_nodbg_end();
I != E;) {
MachineOperand &O = *I++;
MachineInstr *Where = O.getParent();
// Check that MI dominates the instruction in the normal way.
if (&MI == Where || !MDT.dominates(&MI, Where))
continue;
// If this use gets a different value, skip it.
SlotIndex WhereIdx = LIS.getInstructionIndex(*Where);
VNInfo *WhereVNI = FromLI->getVNInfoAt(WhereIdx);
if (WhereVNI && WhereVNI != FromVNI)
continue;
// Make sure ToReg isn't clobbered before it gets there.
VNInfo *ToVNI = ToLI->getVNInfoAt(WhereIdx);
if (ToVNI && ToVNI != FromVNI)
continue;
Changed = true;
LLVM_DEBUG(dbgs() << "Setting operand " << O << " in " << *Where << " from "
<< MI << "\n");
O.setReg(ToReg);
// If the store's def was previously dead, it is no longer.
if (!O.isUndef()) {
MI.getOperand(0).setIsDead(false);
Indices.push_back(WhereIdx.getRegSlot());
}
}
if (Changed) {
// Extend ToReg's liveness.
LIS.extendToIndices(*ToLI, Indices);
// Shrink FromReg's liveness.
LIS.shrinkToUses(FromLI);
// If we replaced all dominated uses, FromReg is now killed at MI.
if (!FromLI->liveAt(FromIdx.getDeadSlot()))
MI.addRegisterKilled(FromReg, MBB.getParent()
->getSubtarget<WebAssemblySubtarget>()
.getRegisterInfo());
}
return Changed;
}
示例14: DEBUG
void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
MachineBasicBlock::iterator mi,
SlotIndex MIIdx,
MachineOperand& MO,
unsigned MOIdx,
LiveInterval &interval) {
DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, TRI));
// Virtual registers may be defined multiple times (due to phi
// elimination and 2-addr elimination). Much of what we do only has to be
// done once for the vreg. We use an empty interval to detect the first
// time we see a vreg.
LiveVariables::VarInfo& vi = LV->getVarInfo(interval.reg);
if (interval.empty()) {
// Get the Idx of the defining instructions.
SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
// Make sure the first definition is not a partial redefinition.
assert(!MO.readsReg() && "First def cannot also read virtual register "
"missing <undef> flag?");
VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
assert(ValNo->id == 0 && "First value in interval is not 0?");
// Loop over all of the blocks that the vreg is defined in. There are
// two cases we have to handle here. The most common case is a vreg
// whose lifetime is contained within a basic block. In this case there
// will be a single kill, in MBB, which comes after the definition.
if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
// FIXME: what about dead vars?
SlotIndex killIdx;
if (vi.Kills[0] != mi)
killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot();
else
killIdx = defIndex.getDeadSlot();
// If the kill happens after the definition, we have an intra-block
// live range.
if (killIdx > defIndex) {
assert(vi.AliveBlocks.empty() &&
"Shouldn't be alive across any blocks!");
LiveRange LR(defIndex, killIdx, ValNo);
interval.addRange(LR);
DEBUG(dbgs() << " +" << LR << "\n");
return;
}
}
// The other case we handle is when a virtual register lives to the end
// of the defining block, potentially live across some blocks, then is
// live into some number of blocks, but gets killed. Start by adding a
// range that goes from this definition to the end of the defining block.
LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
DEBUG(dbgs() << " +" << NewLR);
interval.addRange(NewLR);
bool PHIJoin = LV->isPHIJoin(interval.reg);
if (PHIJoin) {
// A phi join register is killed at the end of the MBB and revived as a
// new valno in the killing blocks.
assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
DEBUG(dbgs() << " phi-join");
} else {
// Iterate over all of the blocks that the variable is completely
// live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
// live interval.
for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
E = vi.AliveBlocks.end(); I != E; ++I) {
MachineBasicBlock *aliveBlock = MF->getBlockNumbered(*I);
LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock),
ValNo);
interval.addRange(LR);
DEBUG(dbgs() << " +" << LR);
}
}
// Finally, this virtual register is live from the start of any killing
// block to the 'use' slot of the killing instruction.
for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
MachineInstr *Kill = vi.Kills[i];
SlotIndex Start = getMBBStartIdx(Kill->getParent());
SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot();
// Create interval with one of a NEW value number. Note that this value
// number isn't actually defined by an instruction, weird huh? :)
if (PHIJoin) {
assert(getInstructionFromIndex(Start) == 0 &&
"PHI def index points at actual instruction.");
ValNo = interval.getNextValue(Start, VNInfoAllocator);
}
LiveRange LR(Start, killIdx, ValNo);
interval.addRange(LR);
DEBUG(dbgs() << " +" << LR);
}
} else {
if (MultipleDefsBySameMI(*mi, MOIdx))
// Multiple defs of the same virtual register by the same instruction.
// e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
//.........这里部分代码省略.........
示例15: WorkList
bool LiveRangeCalc::findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB,
SlotIndex Use, unsigned PhysReg,
ArrayRef<SlotIndex> Undefs) {
unsigned UseMBBNum = UseMBB.getNumber();
// Block numbers where LR should be live-in.
SmallVector<unsigned, 16> WorkList(1, UseMBBNum);
// Remember if we have seen more than one value.
bool UniqueVNI = true;
VNInfo *TheVNI = nullptr;
bool FoundUndef = false;
// Using Seen as a visited set, perform a BFS for all reaching defs.
for (unsigned i = 0; i != WorkList.size(); ++i) {
MachineBasicBlock *MBB = MF->getBlockNumbered(WorkList[i]);
#ifndef NDEBUG
if (MBB->pred_empty()) {
MBB->getParent()->verify();
errs() << "Use of " << printReg(PhysReg)
<< " does not have a corresponding definition on every path:\n";
const MachineInstr *MI = Indexes->getInstructionFromIndex(Use);
if (MI != nullptr)
errs() << Use << " " << *MI;
report_fatal_error("Use not jointly dominated by defs.");
}
if (TargetRegisterInfo::isPhysicalRegister(PhysReg) &&
!MBB->isLiveIn(PhysReg)) {
MBB->getParent()->verify();
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
errs() << "The register " << printReg(PhysReg, TRI)
<< " needs to be live in to " << printMBBReference(*MBB)
<< ", but is missing from the live-in list.\n";
report_fatal_error("Invalid global physical register");
}
#endif
FoundUndef |= MBB->pred_empty();
for (MachineBasicBlock *Pred : MBB->predecessors()) {
// Is this a known live-out block?
if (Seen.test(Pred->getNumber())) {
if (VNInfo *VNI = Map[Pred].first) {
if (TheVNI && TheVNI != VNI)
UniqueVNI = false;
TheVNI = VNI;
}
continue;
}
SlotIndex Start, End;
std::tie(Start, End) = Indexes->getMBBRange(Pred);
// First time we see Pred. Try to determine the live-out value, but set
// it as null if Pred is live-through with an unknown value.
auto EP = LR.extendInBlock(Undefs, Start, End);
VNInfo *VNI = EP.first;
FoundUndef |= EP.second;
setLiveOutValue(Pred, EP.second ? &UndefVNI : VNI);
if (VNI) {
if (TheVNI && TheVNI != VNI)
UniqueVNI = false;
TheVNI = VNI;
}
if (VNI || EP.second)
continue;
// No, we need a live-in value for Pred as well
if (Pred != &UseMBB)
WorkList.push_back(Pred->getNumber());
else
// Loopback to UseMBB, so value is really live through.
Use = SlotIndex();
}
}
LiveIn.clear();
FoundUndef |= (TheVNI == nullptr || TheVNI == &UndefVNI);
if (!Undefs.empty() && FoundUndef)
UniqueVNI = false;
// Both updateSSA() and LiveRangeUpdater benefit from ordered blocks, but
// neither require it. Skip the sorting overhead for small updates.
if (WorkList.size() > 4)
array_pod_sort(WorkList.begin(), WorkList.end());
// If a unique reaching def was found, blit in the live ranges immediately.
if (UniqueVNI) {
assert(TheVNI != nullptr && TheVNI != &UndefVNI);
LiveRangeUpdater Updater(&LR);
for (unsigned BN : WorkList) {
SlotIndex Start, End;
std::tie(Start, End) = Indexes->getMBBRange(BN);
// Trim the live range in UseMBB.
if (BN == UseMBBNum && Use.isValid())
End = Use;
else
Map[MF->getBlockNumbered(BN)] = LiveOutPair(TheVNI, nullptr);
//.........这里部分代码省略.........