本文整理汇总了C++中SmallSet::count方法的典型用法代码示例。如果您正苦于以下问题:C++ SmallSet::count方法的具体用法?C++ SmallSet::count怎么用?C++ SmallSet::count使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmallSet
的用法示例。
在下文中一共展示了SmallSet::count方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: calculateFrameObjectOffsets
//.........这里部分代码省略.........
std::pair<int, int64_t> Entry = MFI->getLocalFrameObjectMap(i);
int64_t FIOffset = (StackGrowsDown ? -Offset : Offset) + Entry.second;
DEBUG(dbgs() << "alloc FI(" << Entry.first << ") at SP[" <<
FIOffset << "]\n");
MFI->setObjectOffset(Entry.first, FIOffset);
}
// Allocate the local block
Offset += MFI->getLocalFrameSize();
MaxAlign = std::max(Align, MaxAlign);
}
// Make sure that the stack protector comes before the local variables on the
// stack.
SmallSet<int, 16> LargeStackObjs;
if (MFI->getStackProtectorIndex() >= 0) {
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
Offset, MaxAlign);
// Assign large stack objects first.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isObjectPreAllocated(i) &&
MFI->getUseLocalStackAllocationBlock())
continue;
if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
continue;
if (RS && (int)i == RS->getScavengingFrameIndex())
continue;
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
if (!MFI->MayNeedStackProtector(i))
continue;
AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
LargeStackObjs.insert(i);
}
}
// Then assign frame offsets to stack objects that are not used to spill
// callee saved registers.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isObjectPreAllocated(i) &&
MFI->getUseLocalStackAllocationBlock())
continue;
if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
continue;
if (RS && (int)i == RS->getScavengingFrameIndex())
continue;
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
if (LargeStackObjs.count(i))
continue;
AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
}
// Make sure the special register scavenging spill slot is closest to the
// stack pointer.
if (RS && (!TFI.hasFP(Fn) || RegInfo->needsStackRealignment(Fn) ||
!RegInfo->useFPForScavengingIndex(Fn))) {
int SFI = RS->getScavengingFrameIndex();
if (SFI >= 0)
AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
}
if (!TFI.targetHandlesStackFrameRounding()) {
// If we have reserved argument space for call sites in the function
// immediately on entry to the current function, count it as part of the
// overall stack size.
if (MFI->adjustsStack() && TFI.hasReservedCallFrame(Fn))
Offset += MFI->getMaxCallFrameSize();
// Round up the size to a multiple of the alignment. If the function has
// any calls or alloca's, align to the target's StackAlignment value to
// ensure that the callee's frame or the alloca data is suitably aligned;
// otherwise, for leaf functions, align to the TransientStackAlignment
// value.
unsigned StackAlign;
if (MFI->adjustsStack() || MFI->hasVarSizedObjects() ||
(RegInfo->needsStackRealignment(Fn) && MFI->getObjectIndexEnd() != 0))
StackAlign = TFI.getStackAlignment();
else
StackAlign = TFI.getTransientStackAlignment();
// If the frame pointer is eliminated, all frame offsets will be relative to
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
unsigned AlignMask = StackAlign - 1;
Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
}
// Update frame info to pretend that this is part of the stack...
int64_t StackSize = Offset - LocalAreaOffset;
MFI->setStackSize(StackSize);
NumBytesStackSpace += StackSize;
}
示例2: HandlePhysRegKill
bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
MachineInstr *LastDef = PhysRegDef[Reg];
MachineInstr *LastUse = PhysRegUse[Reg];
if (!LastDef && !LastUse)
return false;
MachineInstr *LastRefOrPartRef = LastUse ? LastUse : LastDef;
unsigned LastRefOrPartRefDist = DistanceMap[LastRefOrPartRef];
// The whole register is used.
// AL =
// AH =
//
// = AX
// = AL, AX<imp-use, kill>
// AX =
//
// Or whole register is defined, but not used at all.
// AX<dead> =
// ...
// AX =
//
// Or whole register is defined, but only partly used.
// AX<dead> = AL<imp-def>
// = AL<kill>
// AX =
MachineInstr *LastPartDef = 0;
unsigned LastPartDefDist = 0;
SmallSet<unsigned, 8> PartUses;
for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
MachineInstr *Def = PhysRegDef[SubReg];
if (Def && Def != LastDef) {
// There was a def of this sub-register in between. This is a partial
// def, keep track of the last one.
unsigned Dist = DistanceMap[Def];
if (Dist > LastPartDefDist) {
LastPartDefDist = Dist;
LastPartDef = Def;
}
continue;
}
if (MachineInstr *Use = PhysRegUse[SubReg]) {
PartUses.insert(SubReg);
for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
PartUses.insert(*SS);
unsigned Dist = DistanceMap[Use];
if (Dist > LastRefOrPartRefDist) {
LastRefOrPartRefDist = Dist;
LastRefOrPartRef = Use;
}
}
}
if (!PhysRegUse[Reg]) {
// Partial uses. Mark register def dead and add implicit def of
// sub-registers which are used.
// EAX<dead> = op AL<imp-def>
// That is, EAX def is dead but AL def extends pass it.
PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true);
for (const uint16_t *SubRegs = TRI->getSubRegisters(Reg);
unsigned SubReg = *SubRegs; ++SubRegs) {
if (!PartUses.count(SubReg))
continue;
bool NeedDef = true;
if (PhysRegDef[Reg] == PhysRegDef[SubReg]) {
MachineOperand *MO = PhysRegDef[Reg]->findRegisterDefOperand(SubReg);
if (MO) {
NeedDef = false;
assert(!MO->isDead());
}
}
if (NeedDef)
PhysRegDef[Reg]->addOperand(MachineOperand::CreateReg(SubReg,
true/*IsDef*/, true/*IsImp*/));
MachineInstr *LastSubRef = FindLastRefOrPartRef(SubReg);
if (LastSubRef)
LastSubRef->addRegisterKilled(SubReg, TRI, true);
else {
LastRefOrPartRef->addRegisterKilled(SubReg, TRI, true);
PhysRegUse[SubReg] = LastRefOrPartRef;
for (const uint16_t *SSRegs = TRI->getSubRegisters(SubReg);
unsigned SSReg = *SSRegs; ++SSRegs)
PhysRegUse[SSReg] = LastRefOrPartRef;
}
for (const uint16_t *SS = TRI->getSubRegisters(SubReg); *SS; ++SS)
PartUses.erase(*SS);
}
} else if (LastRefOrPartRef == PhysRegDef[Reg] && LastRefOrPartRef != MI) {
if (LastPartDef)
// The last partial def kills the register.
LastPartDef->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/,
true/*IsImp*/, true/*IsKill*/));
else {
MachineOperand *MO =
LastRefOrPartRef->findRegisterDefOperand(Reg, false, TRI);
bool NeedEC = MO->isEarlyClobber() && MO->getReg() != Reg;
// If the last reference is the last def, then it's not used at all.
// That is, unless we are currently processing the last reference itself.
LastRefOrPartRef->addRegisterDead(Reg, TRI, true);
if (NeedEC) {
//.........这里部分代码省略.........
示例3: runOnMachineFunction
//.........这里部分代码省略.........
// Process all defs.
for (unsigned i = 0, e = DefRegs.size(); i != e; ++i) {
unsigned MOReg = DefRegs[i];
if (TargetRegisterInfo::isVirtualRegister(MOReg))
HandleVirtRegDef(MOReg, MI);
else if (!ReservedRegisters[MOReg])
HandlePhysRegDef(MOReg, MI, Defs);
}
UpdatePhysRegDefs(MI, Defs);
}
// Handle any virtual assignments from PHI nodes which might be at the
// bottom of this basic block. We check all of our successor blocks to see
// if they have PHI nodes, and if so, we simulate an assignment at the end
// of the current block.
if (!PHIVarInfo[MBB->getNumber()].empty()) {
SmallVector<unsigned, 4>& VarInfoVec = PHIVarInfo[MBB->getNumber()];
for (SmallVector<unsigned, 4>::iterator I = VarInfoVec.begin(),
E = VarInfoVec.end(); I != E; ++I)
// Mark it alive only in the block we are representing.
MarkVirtRegAliveInBlock(getVarInfo(*I),MRI->getVRegDef(*I)->getParent(),
MBB);
}
// Finally, if the last instruction in the block is a return, make sure to
// mark it as using all of the live-out values in the function.
// Things marked both call and return are tail calls; do not do this for
// them. The tail callee need not take the same registers as input
// that it produces as output, and there are dependencies for its input
// registers elsewhere.
if (!MBB->empty() && MBB->back().isReturn()
&& !MBB->back().isCall()) {
MachineInstr *Ret = &MBB->back();
for (MachineRegisterInfo::liveout_iterator
I = MF->getRegInfo().liveout_begin(),
E = MF->getRegInfo().liveout_end(); I != E; ++I) {
assert(TargetRegisterInfo::isPhysicalRegister(*I) &&
"Cannot have a live-out virtual register!");
HandlePhysRegUse(*I, Ret);
// Add live-out registers as implicit uses.
if (!Ret->readsRegister(*I))
Ret->addOperand(MachineOperand::CreateReg(*I, false, true));
}
}
// MachineCSE may CSE instructions which write to non-allocatable physical
// registers across MBBs. Remember if any reserved register is liveout.
SmallSet<unsigned, 4> LiveOuts;
for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end(); SI != SE; ++SI) {
MachineBasicBlock *SuccMBB = *SI;
if (SuccMBB->isLandingPad())
continue;
for (MachineBasicBlock::livein_iterator LI = SuccMBB->livein_begin(),
LE = SuccMBB->livein_end(); LI != LE; ++LI) {
unsigned LReg = *LI;
if (!TRI->isInAllocatableClass(LReg))
// Ignore other live-ins, e.g. those that are live into landing pads.
LiveOuts.insert(LReg);
}
}
// Loop over PhysRegDef / PhysRegUse, killing any registers that are
// available at the end of the basic block.
for (unsigned i = 0; i != NumRegs; ++i)
if ((PhysRegDef[i] || PhysRegUse[i]) && !LiveOuts.count(i))
HandlePhysRegDef(i, 0, Defs);
std::fill(PhysRegDef, PhysRegDef + NumRegs, (MachineInstr*)0);
std::fill(PhysRegUse, PhysRegUse + NumRegs, (MachineInstr*)0);
}
// Convert and transfer the dead / killed information we have gathered into
// VirtRegInfo onto MI's.
for (unsigned i = 0, e1 = VirtRegInfo.size(); i != e1; ++i) {
const unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
for (unsigned j = 0, e2 = VirtRegInfo[Reg].Kills.size(); j != e2; ++j)
if (VirtRegInfo[Reg].Kills[j] == MRI->getVRegDef(Reg))
VirtRegInfo[Reg].Kills[j]->addRegisterDead(Reg, TRI);
else
VirtRegInfo[Reg].Kills[j]->addRegisterKilled(Reg, TRI);
}
// Check to make sure there are no unreachable blocks in the MC CFG for the
// function. If so, it is due to a bug in the instruction selector or some
// other part of the code generator if this happens.
#ifndef NDEBUG
for(MachineFunction::iterator i = MF->begin(), e = MF->end(); i != e; ++i)
assert(Visited.count(&*i) != 0 && "unreachable basic block found");
#endif
delete[] PhysRegDef;
delete[] PhysRegUse;
delete[] PHIVarInfo;
return false;
}
示例4: runOnFunction
//.........这里部分代码省略.........
// Move the body of the function into the new rewritten function, and replace
// this function with a stub.
NewFunc->getBasicBlockList().splice(NewFunc->begin(), F.getBasicBlockList());
for (std::pair<ReturnInst *, ReplacementVec> &Replacement : Replacements) {
ReturnInst *RI = Replacement.first;
IRBuilder<> B(RI);
B.SetCurrentDebugLocation(RI->getDebugLoc());
int RetIdx = 0;
Value *NewRetVal = UndefValue::get(NewRetTy);
Value *RetVal = RI->getReturnValue();
if (RetVal)
NewRetVal = B.CreateInsertValue(NewRetVal, RetVal, RetIdx++);
for (std::pair<Argument *, Value *> ReturnPoint : Replacement.second) {
Argument *Arg = ReturnPoint.first;
Value *Val = ReturnPoint.second;
Type *EltTy = Arg->getType()->getPointerElementType();
if (Val->getType() != EltTy) {
Type *EffectiveEltTy = EltTy;
if (StructType *CT = dyn_cast<StructType>(EltTy)) {
assert(CT->getNumElements() == 1);
EffectiveEltTy = CT->getElementType(0);
}
if (DL->getTypeSizeInBits(EffectiveEltTy) !=
DL->getTypeSizeInBits(Val->getType())) {
assert(isVec3ToVec4Shuffle(EffectiveEltTy, Val->getType()));
Val = B.CreateShuffleVector(Val, UndefValue::get(Val->getType()),
{ 0, 1, 2 });
}
Val = B.CreateBitCast(Val, EffectiveEltTy);
// Re-create single element composite.
if (EltTy != EffectiveEltTy)
Val = B.CreateInsertValue(UndefValue::get(EltTy), Val, 0);
}
NewRetVal = B.CreateInsertValue(NewRetVal, Val, RetIdx++);
}
if (RetVal)
RI->setOperand(0, NewRetVal);
else {
B.CreateRet(NewRetVal);
RI->eraseFromParent();
}
}
SmallVector<Value *, 16> StubCallArgs;
for (Argument &Arg : F.args()) {
if (OutArgIndexes.count(Arg.getArgNo())) {
// It's easier to preserve the type of the argument list. We rely on
// DeadArgumentElimination to take care of these.
StubCallArgs.push_back(UndefValue::get(Arg.getType()));
} else {
StubCallArgs.push_back(&Arg);
}
}
BasicBlock *StubBB = BasicBlock::Create(Ctx, "", &F);
IRBuilder<> B(StubBB);
CallInst *StubCall = B.CreateCall(NewFunc, StubCallArgs);
int RetIdx = RetTy->isVoidTy() ? 0 : 1;
for (Argument &Arg : F.args()) {
if (!OutArgIndexes.count(Arg.getArgNo()))
continue;
PointerType *ArgType = cast<PointerType>(Arg.getType());
auto *EltTy = ArgType->getElementType();
unsigned Align = Arg.getParamAlignment();
if (Align == 0)
Align = DL->getABITypeAlignment(EltTy);
Value *Val = B.CreateExtractValue(StubCall, RetIdx++);
Type *PtrTy = Val->getType()->getPointerTo(ArgType->getAddressSpace());
// We can peek through bitcasts, so the type may not match.
Value *PtrVal = B.CreateBitCast(&Arg, PtrTy);
B.CreateAlignedStore(Val, PtrVal, Align);
}
if (!RetTy->isVoidTy()) {
B.CreateRet(B.CreateExtractValue(StubCall, 0));
} else {
B.CreateRetVoid();
}
// The function is now a stub we want to inline.
F.addFnAttr(Attribute::AlwaysInline);
++NumOutArgumentFunctionsReplaced;
return true;
}
示例5: verifyCTRBranch
static bool verifyCTRBranch(MachineBasicBlock *MBB,
MachineBasicBlock::iterator I) {
MachineBasicBlock::iterator BI = I;
SmallSet<MachineBasicBlock *, 16> Visited;
SmallVector<MachineBasicBlock *, 8> Preds;
bool CheckPreds;
if (I == MBB->begin()) {
Visited.insert(MBB);
goto queue_preds;
} else
--I;
check_block:
Visited.insert(MBB);
if (I == MBB->end())
goto queue_preds;
CheckPreds = true;
for (MachineBasicBlock::iterator IE = MBB->begin();; --I) {
unsigned Opc = I->getOpcode();
if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) {
CheckPreds = false;
break;
}
if (I != BI && clobbersCTR(I)) {
DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" <<
MBB->getFullName() << ") instruction " << *I <<
" clobbers CTR, invalidating " << "BB#" <<
BI->getParent()->getNumber() << " (" <<
BI->getParent()->getFullName() << ") instruction " <<
*BI << "\n");
return false;
}
if (I == IE)
break;
}
if (!CheckPreds && Preds.empty())
return true;
if (CheckPreds) {
queue_preds:
if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) {
DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" <<
BI->getParent()->getNumber() << " (" <<
BI->getParent()->getFullName() << ") instruction " <<
*BI << "\n");
return false;
}
for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
PIE = MBB->pred_end(); PI != PIE; ++PI)
Preds.push_back(*PI);
}
do {
MBB = Preds.pop_back_val();
if (!Visited.count(MBB)) {
I = MBB->getLastNonDebugInstr();
goto check_block;
}
} while (!Preds.empty());
return true;
}
示例6: runOnLoop
//.........这里部分代码省略.........
SCEVExpander SCEVE(*SE, "pistart");
Value *BasePtrStart = SCEVE.expandCodeFor(BasePtrStartSCEV, I8PtrTy,
LoopPredecessor->getTerminator());
// Note that LoopPredecessor might occur in the predecessor list multiple
// times, and we need to add it the right number of times.
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
PI != PE; ++PI) {
if (*PI != LoopPredecessor)
continue;
NewPHI->addIncoming(BasePtrStart, LoopPredecessor);
}
Instruction *InsPoint = Header->getFirstInsertionPt();
GetElementPtrInst *PtrInc =
GetElementPtrInst::Create(NewPHI, BasePtrIncSCEV->getValue(),
MemI->hasName() ? MemI->getName() + ".inc" : "", InsPoint);
PtrInc->setIsInBounds(IsPtrInBounds(BasePtr));
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
PI != PE; ++PI) {
if (*PI == LoopPredecessor)
continue;
NewPHI->addIncoming(PtrInc, *PI);
}
Instruction *NewBasePtr;
if (PtrInc->getType() != BasePtr->getType())
NewBasePtr = new BitCastInst(PtrInc, BasePtr->getType(),
PtrInc->hasName() ? PtrInc->getName() + ".cast" : "", InsPoint);
else
NewBasePtr = PtrInc;
if (Instruction *IDel = dyn_cast<Instruction>(BasePtr))
BBChanged.insert(IDel->getParent());
BasePtr->replaceAllUsesWith(NewBasePtr);
RecursivelyDeleteTriviallyDeadInstructions(BasePtr);
Value *LastNewPtr = NewBasePtr;
for (Bucket::iterator I = std::next(Buckets[i].begin()),
IE = Buckets[i].end(); I != IE; ++I) {
Value *Ptr = GetPointerOperand(I->second);
assert(Ptr && "No pointer operand");
if (Ptr == LastNewPtr)
continue;
Instruction *RealNewPtr;
const SCEVConstant *Diff =
cast<SCEVConstant>(SE->getMinusSCEV(I->first, BasePtrSCEV));
if (Diff->isZero()) {
RealNewPtr = NewBasePtr;
} else {
Instruction *PtrIP = dyn_cast<Instruction>(Ptr);
if (PtrIP && isa<Instruction>(NewBasePtr) &&
cast<Instruction>(NewBasePtr)->getParent() == PtrIP->getParent())
PtrIP = 0;
else if (isa<PHINode>(PtrIP))
PtrIP = PtrIP->getParent()->getFirstInsertionPt();
else if (!PtrIP)
PtrIP = I->second;
GetElementPtrInst *NewPtr =
GetElementPtrInst::Create(PtrInc, Diff->getValue(),
I->second->hasName() ? I->second->getName() + ".off" : "", PtrIP);
if (!PtrIP)
NewPtr->insertAfter(cast<Instruction>(PtrInc));
NewPtr->setIsInBounds(IsPtrInBounds(Ptr));
RealNewPtr = NewPtr;
}
if (Instruction *IDel = dyn_cast<Instruction>(Ptr))
BBChanged.insert(IDel->getParent());
Instruction *ReplNewPtr;
if (Ptr->getType() != RealNewPtr->getType()) {
ReplNewPtr = new BitCastInst(RealNewPtr, Ptr->getType(),
Ptr->hasName() ? Ptr->getName() + ".cast" : "");
ReplNewPtr->insertAfter(RealNewPtr);
} else
ReplNewPtr = RealNewPtr;
Ptr->replaceAllUsesWith(ReplNewPtr);
RecursivelyDeleteTriviallyDeadInstructions(Ptr);
LastNewPtr = RealNewPtr;
}
MadeChange = true;
}
for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
I != IE; ++I) {
if (BBChanged.count(*I))
DeleteDeadPHIs(*I);
}
return MadeChange;
}
示例7: HoistRegionPostRA
/// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
/// invariants out to the preheader.
void MachineLICM::HoistRegionPostRA() {
unsigned NumRegs = TRI->getNumRegs();
unsigned *PhysRegDefs = new unsigned[NumRegs];
std::fill(PhysRegDefs, PhysRegDefs + NumRegs, 0);
SmallVector<CandidateInfo, 32> Candidates;
SmallSet<int, 32> StoredFIs;
// Walk the entire region, count number of defs for each register, and
// collect potential LICM candidates.
const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
MachineBasicBlock *BB = Blocks[i];
// Conservatively treat live-in's as an external def.
// FIXME: That means a reload that're reused in successor block(s) will not
// be LICM'ed.
for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
E = BB->livein_end(); I != E; ++I) {
unsigned Reg = *I;
++PhysRegDefs[Reg];
for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
++PhysRegDefs[*AS];
}
for (MachineBasicBlock::iterator
MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
MachineInstr *MI = &*MII;
ProcessMI(MI, PhysRegDefs, StoredFIs, Candidates);
}
}
// Now evaluate whether the potential candidates qualify.
// 1. Check if the candidate defined register is defined by another
// instruction in the loop.
// 2. If the candidate is a load from stack slot (always true for now),
// check if the slot is stored anywhere in the loop.
for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
if (Candidates[i].FI != INT_MIN &&
StoredFIs.count(Candidates[i].FI))
continue;
if (PhysRegDefs[Candidates[i].Def] == 1) {
bool Safe = true;
MachineInstr *MI = Candidates[i].MI;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
const MachineOperand &MO = MI->getOperand(j);
if (!MO.isReg() || MO.isDef() || !MO.getReg())
continue;
if (PhysRegDefs[MO.getReg()]) {
// If it's using a non-loop-invariant register, then it's obviously
// not safe to hoist.
Safe = false;
break;
}
}
if (Safe)
HoistPostRA(MI, Candidates[i].Def);
}
}
delete[] PhysRegDefs;
}
示例8: calculateFrameObjectOffsets
/// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
/// abstract stack objects.
///
void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
// Loop over all of the stack objects, assigning sequential addresses...
MachineFrameInfo *MFI = Fn.getFrameInfo();
const TargetFrameLowering &TFI = *Fn.getSubtarget().getFrameLowering();
bool StackGrowsDown =
TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
int64_t Offset = 0;
unsigned MaxAlign = 0;
StackProtector *SP = &getAnalysis<StackProtector>();
// Make sure that the stack protector comes before the local variables on the
// stack.
SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
StackObjSet SmallArrayObjs;
StackObjSet AddrOfObjs;
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
StackGrowsDown, MaxAlign);
// Assign large stack objects first.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
continue;
case StackProtector::SSPLK_SmallArray:
SmallArrayObjs.insert(i);
continue;
case StackProtector::SSPLK_AddrOf:
AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
continue;
}
llvm_unreachable("Unexpected SSPLayoutKind.");
}
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill
// callee saved registers.
for (unsigned i = 0, e = MFI->getObjectIndexEnd(); i != e; ++i) {
if (MFI->isDeadObjectIndex(i))
continue;
if (MFI->getStackProtectorIndex() == (int)i)
continue;
if (ProtectedObjs.count(i))
continue;
AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
}
// Remember how big this blob of stack space is
MFI->setLocalFrameSize(Offset);
MFI->setLocalFrameMaxAlign(MaxAlign);
}
示例9: finalizeBundle
/// finalizeBundle - Finalize a machine instruction bundle which includes
/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
/// This routine adds a BUNDLE instruction to represent the bundle, it adds
/// IsInternalRead markers to MachineOperands which are defined inside the
/// bundle, and it copies externally visible defs and uses to the BUNDLE
/// instruction.
void llvm::finalizeBundle(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator FirstMI,
MachineBasicBlock::instr_iterator LastMI) {
assert(FirstMI != LastMI && "Empty bundle?");
MIBundleBuilder Bundle(MBB, FirstMI, LastMI);
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
MachineInstrBuilder MIB =
BuildMI(MF, FirstMI->getDebugLoc(), TII->get(TargetOpcode::BUNDLE));
Bundle.prepend(MIB);
SmallVector<unsigned, 32> LocalDefs;
SmallSet<unsigned, 32> LocalDefSet;
SmallSet<unsigned, 8> DeadDefSet;
SmallSet<unsigned, 16> KilledDefSet;
SmallVector<unsigned, 8> ExternUses;
SmallSet<unsigned, 8> ExternUseSet;
SmallSet<unsigned, 8> KilledUseSet;
SmallSet<unsigned, 8> UndefUseSet;
SmallVector<MachineOperand*, 4> Defs;
for (; FirstMI != LastMI; ++FirstMI) {
for (unsigned i = 0, e = FirstMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = FirstMI->getOperand(i);
if (!MO.isReg())
continue;
if (MO.isDef()) {
Defs.push_back(&MO);
continue;
}
unsigned Reg = MO.getReg();
if (!Reg)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
if (LocalDefSet.count(Reg)) {
MO.setIsInternalRead();
if (MO.isKill())
// Internal def is now killed.
KilledDefSet.insert(Reg);
} else {
if (ExternUseSet.insert(Reg).second) {
ExternUses.push_back(Reg);
if (MO.isUndef())
UndefUseSet.insert(Reg);
}
if (MO.isKill())
// External def is now killed.
KilledUseSet.insert(Reg);
}
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
MachineOperand &MO = *Defs[i];
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (LocalDefSet.insert(Reg).second) {
LocalDefs.push_back(Reg);
if (MO.isDead()) {
DeadDefSet.insert(Reg);
}
} else {
// Re-defined inside the bundle, it's no longer killed.
KilledDefSet.erase(Reg);
if (!MO.isDead())
// Previously defined but dead.
DeadDefSet.erase(Reg);
}
if (!MO.isDead()) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (LocalDefSet.insert(SubReg).second)
LocalDefs.push_back(SubReg);
}
}
}
Defs.clear();
}
SmallSet<unsigned, 32> Added;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
if (Added.insert(Reg).second) {
// If it's not live beyond end of the bundle, mark it dead.
bool isDead = DeadDefSet.count(Reg) || KilledDefSet.count(Reg);
MIB.addReg(Reg, getDefRegState(true) | getDeadRegState(isDead) |
getImplRegState(true));
}
//.........这里部分代码省略.........
示例10: IsSecurityRelatedCast
bool CastVerifier::IsSecurityRelatedCast(Value *value, Type *CastedTy,
Type *OrigTy,
SmallSet<Value *, 16> &Visited) {
// Propagate the value originated from the bitcasted value. If any of the
// propagated value is 1) used for return/call/invoke instructions, 2) not
// a local variable, or 3) touching beyond the boundary of the original
// type (before the bit-casting), it is a security related static_cast. If
// it is not, it is non-security static_cast and we remove the
// instrumented instructions.
if (Visited.count(value)) // Already visited.
return false;
Type *valueTy = value->getType();
CVER_DEBUG("Visit : " << *value << " (" << *valueTy << ")\n");
Visited.insert(value);
// Check how the value is taken.
if (isa<GlobalValue>(value)) {
CVER_DEBUG("\t True (global)\n");
return true;
} else if (LoadInst *LI = dyn_cast<LoadInst>(value)) {
// If the value is taken from the outside, then we lost the control and mark
// it as security sensitive.
if (LI->getType() != CastedTy) {
CVER_DEBUG("\t True (heap?)\n");
return true;
}
}
// Check how the value is used.
for (User *user : value->users()) {
CVER_DEBUG("\t (user) " << *user << "\n");
if (StoreInst *SI = dyn_cast<StoreInst>(user)) {
// store <CastedTy> <value>, <CastedTy>* <ptr>
if (SI->getValueOperand()->getType() == CastedTy ||
SI->getValueOperand()->getType() == valueTy) {
if (IsSecurityRelatedCast(SI->getPointerOperand(),
CastedTy, OrigTy, Visited))
return true;
} else {
return true;
}
} // end of StoreInst
else if (LoadInst *LI = dyn_cast<LoadInst>(user)) {
// <value> = load <ty>* <ptr>
if (LI->getType() == CastedTy) {
if (IsSecurityRelatedCast(user, CastedTy, OrigTy, Visited))
return true;
} else {
return true;
}
}
else if (BitCastInst *BCI = dyn_cast<BitCastInst>(user)) {
// <result> = bitcast <ty> <value> to <ty2>
// If it is casted to larger types than the original type, this must be
// security sensitive. If it is smaller, than all the following memory
// acceses will be safe.
// FIXME : use original type, not the casted type.
if (BCI->getType()->getIntegerBitWidth() > OrigTy->getIntegerBitWidth()) {
return true;
}
}
else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(user)) {
if (GEP->getPointerOperandType() == CastedTy)
return true;
}
else if (isa<ReturnInst>(user) ||
isa<CallInst>(user) ||
isa<InvokeInst>(user)) {
return true;
}
} // end of User loop.
return false;
}
示例11: runOnMachineFunction
/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
/// there is one implicit_def for each use. Add isUndef marker to
/// implicit_def defs and their uses.
bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n"
<< "********** Function: "
<< ((Value*)fn.getFunction())->getName() << '\n');
bool Changed = false;
TII = fn.getTarget().getInstrInfo();
TRI = fn.getTarget().getRegisterInfo();
MRI = &fn.getRegInfo();
LV = &getAnalysis<LiveVariables>();
SmallSet<unsigned, 8> ImpDefRegs;
SmallVector<MachineInstr*, 8> ImpDefMIs;
SmallVector<MachineInstr*, 4> RUses;
SmallPtrSet<MachineBasicBlock*,16> Visited;
SmallPtrSet<MachineInstr*, 8> ModInsts;
MachineBasicBlock *Entry = fn.begin();
for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
DFI != E; ++DFI) {
MachineBasicBlock *MBB = *DFI;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
I != E; ) {
MachineInstr *MI = &*I;
++I;
if (MI->isImplicitDef()) {
ImpDefMIs.push_back(MI);
// Is this a sub-register read-modify-write?
if (MI->getOperand(0).readsReg())
continue;
unsigned Reg = MI->getOperand(0).getReg();
ImpDefRegs.insert(Reg);
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
for (const unsigned *SS = TRI->getSubRegisters(Reg); *SS; ++SS)
ImpDefRegs.insert(*SS);
}
continue;
}
// Eliminate %reg1032:sub<def> = COPY undef.
if (MI->isCopy() && MI->getOperand(0).readsReg()) {
MachineOperand &MO = MI->getOperand(1);
if (MO.isUndef() || ImpDefRegs.count(MO.getReg())) {
if (MO.isKill()) {
LiveVariables::VarInfo& vi = LV->getVarInfo(MO.getReg());
vi.removeKill(MI);
}
unsigned Reg = MI->getOperand(0).getReg();
MI->eraseFromParent();
Changed = true;
// A REG_SEQUENCE may have been expanded into partial definitions.
// If this was the last one, mark Reg as implicitly defined.
if (TargetRegisterInfo::isVirtualRegister(Reg) && MRI->def_empty(Reg))
ImpDefRegs.insert(Reg);
continue;
}
}
bool ChangedToImpDef = false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand& MO = MI->getOperand(i);
if (!MO.isReg() || !MO.readsReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (!ImpDefRegs.count(Reg))
continue;
// Use is a copy, just turn it into an implicit_def.
if (CanTurnIntoImplicitDef(MI, Reg, i, ImpDefRegs)) {
bool isKill = MO.isKill();
MI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
MI->RemoveOperand(j);
if (isKill) {
ImpDefRegs.erase(Reg);
LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
vi.removeKill(MI);
}
ChangedToImpDef = true;
Changed = true;
break;
}
Changed = true;
MO.setIsUndef();
// This is a partial register redef of an implicit def.
// Make sure the whole register is defined by the instruction.
if (MO.isDef()) {
MI->addRegisterDefined(Reg);
continue;
}
if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
//.........这里部分代码省略.........
示例12: while
bool Thumb2ITBlockPass::InsertITInstructions(MachineBasicBlock &MBB) {
bool Modified = false;
SmallSet<unsigned, 4> Defs;
SmallSet<unsigned, 4> Uses;
MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
while (MBBI != E) {
MachineInstr *MI = &*MBBI;
DebugLoc dl = MI->getDebugLoc();
unsigned PredReg = 0;
ARMCC::CondCodes CC = getPredicate(MI, PredReg);
if (CC == ARMCC::AL) {
++MBBI;
continue;
}
Defs.clear();
Uses.clear();
TrackDefUses(MI, Defs, Uses);
// Insert an IT instruction.
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII->get(ARM::t2IT))
.addImm(CC);
MachineBasicBlock::iterator InsertPos = MIB;
++MBBI;
// Finalize IT mask.
ARMCC::CondCodes OCC = ARMCC::getOppositeCondition(CC);
unsigned Mask = 0, Pos = 3;
// Branches, including tricky ones like LDM_RET, need to end an IT
// block so check the instruction we just put in the block.
for (; MBBI != E && Pos &&
(!MI->getDesc().isBranch() && !MI->getDesc().isReturn()) ; ++MBBI) {
if (MBBI->isDebugValue())
continue;
MachineInstr *NMI = &*MBBI;
MI = NMI;
unsigned NPredReg = 0;
ARMCC::CondCodes NCC = getPredicate(NMI, NPredReg);
if (NCC == CC || NCC == OCC)
Mask |= (NCC & 1) << Pos;
else {
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
if (NCC == ARMCC::AL &&
TII->isMoveInstr(*NMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) {
assert(SrcSubIdx == 0 && DstSubIdx == 0 &&
"Sub-register indices still around?");
// llvm models select's as two-address instructions. That means a copy
// is inserted before a t2MOVccr, etc. If the copy is scheduled in
// between selects we would end up creating multiple IT blocks.
if (!Uses.count(DstReg) && !Defs.count(SrcReg)) {
--MBBI;
MBB.remove(NMI);
MBB.insert(InsertPos, NMI);
++NumMovedInsts;
continue;
}
}
break;
}
TrackDefUses(NMI, Defs, Uses);
--Pos;
}
Mask |= (1 << Pos);
// Tag along (firstcond[0] << 4) with the mask.
Mask |= (CC & 1) << 4;
MIB.addImm(Mask);
Modified = true;
++NumITs;
}
return Modified;
}
示例13: optimizeCheck
/// optimizeCheck - replace the given check CallInst with the check's fast
/// version if all the source memory objects can be found and it is obvious
/// that none of them have been freed at the point where the check is made.
/// Returns the new call if possible and NULL otherwise.
///
/// This currently works only with memory objects that can't be freed:
/// * global variables
/// * allocas that trivially have function scope
/// * byval arguments
///
bool ExactCheckOpt::optimizeCheck(CallInst *CI, CheckInfoType *Info) {
// Examined values
SmallSet<Value*, 16> Visited;
// Potential memory objects
SmallSet<Value*, 4> Objects;
std::queue<Value*> Q;
// Start from the the pointer operand
Value *StartPtr = CI->getArgOperand(Info->PtrArgNo)->stripPointerCasts();
Q.push(StartPtr);
// Use BFS to find all potential memory objects
while(!Q.empty()) {
Value *o = Q.front()->stripPointerCasts();
Q.pop();
if(Visited.count(o))
continue;
Visited.insert(o);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(o)) {
if (CE->getOpcode() == Instruction::GetElementPtr) {
Q.push(CE->getOperand(0));
} else {
// Exit early if any of the objects are unsupported.
if (!isSimpleMemoryObject(o))
return false;
Objects.insert(o);
}
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(o)) {
Q.push(GEP->getPointerOperand());
// It is fine to ignore the case of indexing into null with a pointer
// because that case is invalid for LLVM-aware objects such as allocas,
// globals, and objects pointed to by noalias pointers.
} else if(PHINode *PHI = dyn_cast<PHINode>(o)) {
for (unsigned i = 0, num = PHI->getNumIncomingValues(); i != num; ++i)
Q.push(PHI->getIncomingValue(i));
} else if (SelectInst *SI = dyn_cast<SelectInst>(o)) {
Q.push(SI->getTrueValue());
Q.push(SI->getFalseValue());
} else {
// Exit early if any of the objects are unsupported.
if (!isSimpleMemoryObject(o))
return false;
Objects.insert(o);
}
}
// Mapping from the initial value to the corresponding size and void pointer:
// * memory object -> its size and pointer
// * phi/select -> corresponding phi/select for the sizes and pointers
// * anything else -> the corresponding size and pointer on the path
std::map <Value*, PtrSizePair> M;
Module &Mod = *CI->getParent()->getParent()->getParent();
Type *SizeTy = getSizeType(Info, Mod);
// Add non-instruction non-constant allocation object pointers to the front
// of the function's entry block.
BasicBlock &EntryBlock = CI->getParent()->getParent()->getEntryBlock();
Instruction *FirstInsertionPoint = ++BasicBlock::iterator(EntryBlock.begin());
for (SmallSet<Value*, 16>::const_iterator It = Objects.begin(),
E = Objects.end();
It != E;
++It) {
// Obj is a memory object pointer: alloca, argument, load, callinst, etc.
Value *Obj = *It;
// Insert instruction-based allocation pointers just after the allocation.
Instruction *InsertBefore = FirstInsertionPoint;
if (Instruction *I = dyn_cast<Instruction>(Obj))
InsertBefore = ++BasicBlock::iterator(I);
IRBuilder<> Builder(InsertBefore);
SizeOffsetEvalType SizeOffset = ObjSizeEval->compute(Obj);
assert(ObjSizeEval->bothKnown(SizeOffset));
assert(dyn_cast<ConstantInt>(SizeOffset.second)->isZero());
Value *Size = Builder.CreateIntCast(SizeOffset.first, SizeTy,
/*isSigned=*/false);
Value *Ptr = Builder.CreatePointerCast(Obj, VoidPtrTy);
M[Obj] = std::make_pair(Ptr, Size);
}
// Create the rest of the size values and object pointers.
// The phi nodes will be finished later.
for (SmallSet<Value*, 16>::const_iterator I = Visited.begin(),
E = Visited.end();
I != E;
++I) {
//.........这里部分代码省略.........
示例14: calculateFrameObjectOffsets
//.........这里部分代码省略.........
AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
continue;
}
llvm_unreachable("Unexpected SSPLayoutKind.");
}
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign, Skew);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign, Skew);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign, Skew);
}
SmallVector<int, 8> ObjectsToAllocate;
// Then prepare to assign frame offsets to stack objects that are not used to
// spill callee saved registers.
for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
if (MFI.isObjectPreAllocated(i) && MFI.getUseLocalStackAllocationBlock())
continue;
if (i >= MinCSFrameIndex && i <= MaxCSFrameIndex)
continue;
if (RS && RS->isScavengingFrameIndex((int)i))
continue;
if (MFI.isDeadObjectIndex(i))
continue;
if (MFI.getStackProtectorIndex() == (int)i ||
EHRegNodeFrameIndex == (int)i)
continue;
if (ProtectedObjs.count(i))
continue;
// Add the objects that we need to allocate to our working set.
ObjectsToAllocate.push_back(i);
}
// Allocate the EH registration node first if one is present.
if (EHRegNodeFrameIndex != INT_MAX)
AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset,
MaxAlign, Skew);
// Give the targets a chance to order the objects the way they like it.
if (Fn.getTarget().getOptLevel() != CodeGenOpt::None &&
Fn.getTarget().Options.StackSymbolOrdering)
TFI.orderFrameObjects(Fn, ObjectsToAllocate);
// Keep track of which bytes in the fixed and callee-save range are used so we
// can use the holes when allocating later stack objects. Only do this if
// stack protector isn't being used and the target requests it and we're
// optimizing.
BitVector StackBytesFree;
if (!ObjectsToAllocate.empty() &&
Fn.getTarget().getOptLevel() != CodeGenOpt::None &&
MFI.getStackProtectorIndex() < 0 && TFI.enableStackSlotScavenging(Fn))
computeFreeStackSlots(MFI, StackGrowsDown, MinCSFrameIndex, MaxCSFrameIndex,
FixedCSEnd, StackBytesFree);
// Now walk the objects and actually assign base offsets to them.
for (auto &Object : ObjectsToAllocate)
if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign,
StackBytesFree))
AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew);
示例15: if
//.........这里部分代码省略.........
// extend to the end of the new split block.
bool isLastMBB =
std::next(MachineFunction::iterator(NMBB)) == getParent()->end();
SlotIndex StartIndex = Indexes->getMBBEndIdx(this);
SlotIndex PrevIndex = StartIndex.getPrevSlot();
SlotIndex EndIndex = Indexes->getMBBEndIdx(NMBB);
// Find the registers used from NMBB in PHIs in Succ.
SmallSet<unsigned, 8> PHISrcRegs;
for (MachineBasicBlock::instr_iterator
I = Succ->instr_begin(), E = Succ->instr_end();
I != E && I->isPHI(); ++I) {
for (unsigned ni = 1, ne = I->getNumOperands(); ni != ne; ni += 2) {
if (I->getOperand(ni+1).getMBB() == NMBB) {
MachineOperand &MO = I->getOperand(ni);
unsigned Reg = MO.getReg();
PHISrcRegs.insert(Reg);
if (MO.isUndef())
continue;
LiveInterval &LI = LIS->getInterval(Reg);
VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
assert(VNI && "PHI sources should be live out of their predecessors.");
LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
}
}
}
MachineRegisterInfo *MRI = &getParent()->getRegInfo();
for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (PHISrcRegs.count(Reg) || !LIS->hasInterval(Reg))
continue;
LiveInterval &LI = LIS->getInterval(Reg);
if (!LI.liveAt(PrevIndex))
continue;
bool isLiveOut = LI.liveAt(LIS->getMBBStartIdx(Succ));
if (isLiveOut && isLastMBB) {
VNInfo *VNI = LI.getVNInfoAt(PrevIndex);
assert(VNI && "LiveInterval should have VNInfo where it is live.");
LI.addSegment(LiveInterval::Segment(StartIndex, EndIndex, VNI));
} else if (!isLiveOut && !isLastMBB) {
LI.removeSegment(StartIndex, EndIndex);
}
}
// Update all intervals for registers whose uses may have been modified by
// updateTerminator().
LIS->repairIntervalsInRange(this, getFirstTerminator(), end(), UsedRegs);
}
if (MachineDominatorTree *MDT =
P->getAnalysisIfAvailable<MachineDominatorTree>()) {
// Update dominator information.
MachineDomTreeNode *SucccDTNode = MDT->getNode(Succ);
bool IsNewIDom = true;
for (const_pred_iterator PI = Succ->pred_begin(), E = Succ->pred_end();
PI != E; ++PI) {
MachineBasicBlock *PredBB = *PI;
if (PredBB == NMBB)
continue;