本文整理汇总了C++中ArrayRef::back方法的典型用法代码示例。如果您正苦于以下问题:C++ ArrayRef::back方法的具体用法?C++ ArrayRef::back怎么用?C++ ArrayRef::back使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ArrayRef
的用法示例。
在下文中一共展示了ArrayRef::back方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getNextUncurryLevelRef
static SILValue getNextUncurryLevelRef(SILGenFunction &gen,
SILLocation loc,
SILDeclRef next,
bool direct,
ArrayRef<SILValue> curriedArgs,
ArrayRef<Substitution> curriedSubs) {
// For a foreign function, reference the native thunk.
if (next.isForeign)
return gen.emitGlobalFunctionRef(loc, next.asForeign(false));
// If the fully-uncurried reference is to a native dynamic class method, emit
// the dynamic dispatch.
auto fullyAppliedMethod = !next.isCurried && !next.isForeign && !direct &&
next.hasDecl();
auto constantInfo = gen.SGM.Types.getConstantInfo(next);
SILValue thisArg;
if (!curriedArgs.empty())
thisArg = curriedArgs.back();
if (fullyAppliedMethod &&
isa<AbstractFunctionDecl>(next.getDecl()) &&
gen.getMethodDispatch(cast<AbstractFunctionDecl>(next.getDecl()))
== MethodDispatch::Class) {
SILValue thisArg = curriedArgs.back();
// Use the dynamic thunk if dynamic.
if (next.getDecl()->isDynamic()) {
auto dynamicThunk = gen.SGM.getDynamicThunk(next, constantInfo);
return gen.B.createFunctionRef(loc, dynamicThunk);
}
return gen.B.createClassMethod(loc, thisArg, next);
}
// If the fully-uncurried reference is to a generic method, look up the
// witness.
if (fullyAppliedMethod &&
constantInfo.SILFnType->getRepresentation()
== SILFunctionTypeRepresentation::WitnessMethod) {
auto thisType = curriedSubs[0].getReplacement()->getCanonicalType();
assert(isa<ArchetypeType>(thisType) && "no archetype for witness?!");
SILValue OpenedExistential;
if (!cast<ArchetypeType>(thisType)->getOpenedExistentialType().isNull())
OpenedExistential = thisArg;
return gen.B.createWitnessMethod(loc, thisType, nullptr, next,
constantInfo.getSILType(),
OpenedExistential);
}
// Otherwise, emit a direct call.
return gen.emitGlobalFunctionRef(loc, next);
}
示例2: append
DIExpression *DIExpression::appendToStack(const DIExpression *Expr,
ArrayRef<uint64_t> Ops) {
assert(Expr && !Ops.empty() && "Can't append ops to this expression");
assert(none_of(Ops,
[](uint64_t Op) {
return Op == dwarf::DW_OP_stack_value ||
Op == dwarf::DW_OP_LLVM_fragment;
}) &&
"Can't append this op");
// Append a DW_OP_deref after Expr's current op list if it's non-empty and
// has no DW_OP_stack_value.
//
// Match .* DW_OP_stack_value (DW_OP_LLVM_fragment A B)?.
Optional<FragmentInfo> FI = Expr->getFragmentInfo();
unsigned DropUntilStackValue = FI.hasValue() ? 3 : 0;
ArrayRef<uint64_t> ExprOpsBeforeFragment =
Expr->getElements().drop_back(DropUntilStackValue);
bool NeedsDeref = (Expr->getNumElements() > DropUntilStackValue) &&
(ExprOpsBeforeFragment.back() != dwarf::DW_OP_stack_value);
bool NeedsStackValue = NeedsDeref || ExprOpsBeforeFragment.empty();
// Append a DW_OP_deref after Expr's current op list if needed, then append
// the new ops, and finally ensure that a single DW_OP_stack_value is present.
SmallVector<uint64_t, 16> NewOps;
if (NeedsDeref)
NewOps.push_back(dwarf::DW_OP_deref);
NewOps.append(Ops.begin(), Ops.end());
if (NeedsStackValue)
NewOps.push_back(dwarf::DW_OP_stack_value);
return DIExpression::append(Expr, NewOps);
}
示例3: emitBuiltinAssign
static ManagedValue emitBuiltinAssign(SILGenFunction &gen,
SILLocation loc,
SubstitutionList substitutions,
ArrayRef<ManagedValue> args,
CanFunctionType formalApplyType,
SGFContext C) {
assert(args.size() >= 2 && "assign should have two arguments");
assert(substitutions.size() == 1 &&
"assign should have a single substitution");
// The substitution determines the type of the thing we're destroying.
CanType assignFormalType = substitutions[0].getReplacement()->getCanonicalType();
SILType assignType = gen.getLoweredType(assignFormalType);
// Convert the destination pointer argument to a SIL address.
SILValue addr = gen.B.createPointerToAddress(loc,
args.back().getUnmanagedValue(),
assignType.getAddressType(),
/*isStrict*/ true);
// Build the value to be assigned, reconstructing tuples if needed.
auto src = RValue::withPreExplodedElements(args.slice(0, args.size() - 1),
assignFormalType);
std::move(src).assignInto(gen, loc, addr);
return ManagedValue::forUnmanaged(gen.emitEmptyTuple(loc));
}
示例4: emitBuiltinAssign
static ManagedValue emitBuiltinAssign(SILGenFunction &SGF,
SILLocation loc,
SubstitutionMap substitutions,
ArrayRef<ManagedValue> args,
SGFContext C) {
assert(args.size() >= 2 && "assign should have two arguments");
assert(substitutions.getReplacementTypes().size() == 1 &&
"assign should have a single substitution");
// The substitution determines the type of the thing we're destroying.
CanType assignFormalType =
substitutions.getReplacementTypes()[0]->getCanonicalType();
SILType assignType = SGF.getLoweredType(assignFormalType);
// Convert the destination pointer argument to a SIL address.
SILValue addr = SGF.B.createPointerToAddress(loc,
args.back().getUnmanagedValue(),
assignType.getAddressType(),
/*isStrict*/ true,
/*isInvariant*/ false);
// Build the value to be assigned, reconstructing tuples if needed.
auto src = RValue(SGF, args.slice(0, args.size() - 1), assignFormalType);
std::move(src).ensurePlusOne(SGF, loc).assignInto(SGF, loc, addr);
return ManagedValue::forUnmanaged(SGF.emitEmptyTuple(loc));
}
示例5: fullModuleNameIs
bool Module::fullModuleNameIs(ArrayRef<StringRef> nameParts) const {
for (const Module *M = this; M; M = M->Parent) {
if (nameParts.empty() || M->Name != nameParts.back())
return false;
nameParts = nameParts.drop_back();
}
return nameParts.empty();
}
示例6: reportModuleReferences
static void reportModuleReferences(const Module *Mod,
ArrayRef<SourceLocation> IdLocs,
const ImportDecl *ImportD,
IndexDataConsumer &DataConsumer) {
if (!Mod)
return;
reportModuleReferences(Mod->Parent, IdLocs.drop_back(), ImportD,
DataConsumer);
DataConsumer.handleModuleOccurence(ImportD, Mod,
(SymbolRoleSet)SymbolRole::Reference,
IdLocs.back());
}
示例7: passModulePathElements
bool SemaAnnotator::passModulePathElements(
ArrayRef<ImportDecl::AccessPathElement> Path,
const clang::Module *ClangMod) {
if (Path.empty() || !ClangMod)
return true;
if (!passModulePathElements(Path.drop_back(1), ClangMod->Parent))
return false;
return passReference(ClangMod, Path.back());
}
示例8: createError
Expected<typename ELFT::DynRange> ELFFile<ELFT>::dynamicEntries() const {
ArrayRef<Elf_Dyn> Dyn;
size_t DynSecSize = 0;
auto ProgramHeadersOrError = program_headers();
if (!ProgramHeadersOrError)
return ProgramHeadersOrError.takeError();
for (const Elf_Phdr &Phdr : *ProgramHeadersOrError) {
if (Phdr.p_type == ELF::PT_DYNAMIC) {
Dyn = makeArrayRef(
reinterpret_cast<const Elf_Dyn *>(base() + Phdr.p_offset),
Phdr.p_filesz / sizeof(Elf_Dyn));
DynSecSize = Phdr.p_filesz;
break;
}
}
// If we can't find the dynamic section in the program headers, we just fall
// back on the sections.
if (Dyn.empty()) {
auto SectionsOrError = sections();
if (!SectionsOrError)
return SectionsOrError.takeError();
for (const Elf_Shdr &Sec : *SectionsOrError) {
if (Sec.sh_type == ELF::SHT_DYNAMIC) {
Expected<ArrayRef<Elf_Dyn>> DynOrError =
getSectionContentsAsArray<Elf_Dyn>(&Sec);
if (!DynOrError)
return DynOrError.takeError();
Dyn = *DynOrError;
DynSecSize = Sec.sh_size;
break;
}
}
if (!Dyn.data())
return ArrayRef<Elf_Dyn>();
}
if (Dyn.empty())
return createError("invalid empty dynamic section");
if (DynSecSize % sizeof(Elf_Dyn) != 0)
return createError("malformed dynamic section");
if (Dyn.back().d_tag != ELF::DT_NULL)
return createError("dynamic sections must be DT_NULL terminated");
return Dyn;
}
示例9: assert
static SILValue
getThunkedForeignFunctionRef(SILGenFunction &gen,
SILLocation loc,
SILDeclRef foreign,
ArrayRef<ManagedValue> args,
ArrayRef<Substitution> subs,
const SILConstantInfo &foreignCI) {
assert(!foreign.isCurried
&& "should not thunk calling convention when curried");
// Produce a witness_method when thunking ObjC protocol methods.
auto dc = foreign.getDecl()->getDeclContext();
if (isa<ProtocolDecl>(dc) && cast<ProtocolDecl>(dc)->isObjC()) {
assert(subs.size() == 1);
auto thisType = subs[0].getReplacement()->getCanonicalType();
assert(isa<ArchetypeType>(thisType) && "no archetype for witness?!");
SILValue thisArg = args.back().getValue();
SILValue OpenedExistential;
if (!cast<ArchetypeType>(thisType)->getOpenedExistentialType().isNull())
OpenedExistential = thisArg;
auto conformance = ProtocolConformanceRef(cast<ProtocolDecl>(dc));
return gen.B.createWitnessMethod(loc, thisType, conformance, foreign,
foreignCI.getSILType(),
OpenedExistential);
// Produce a class_method when thunking imported ObjC methods.
} else if (foreignCI.SILFnType->getRepresentation()
== SILFunctionTypeRepresentation::ObjCMethod) {
assert(subs.empty());
SILValue thisArg = args.back().getValue();
return gen.B.createClassMethod(loc, thisArg, foreign,
SILType::getPrimitiveObjectType(foreignCI.SILFnType),
/*volatile*/ true);
}
// Otherwise, emit a function_ref.
return gen.emitGlobalFunctionRef(loc, foreign);
}
示例10: emitter
/// Emit an open-coded protocol-witness thunk for materializeForSet if
/// delegating to the standard implementation isn't good enough.
///
/// materializeForSet sometimes needs to be open-coded because of the
/// thin callback function, which is dependent but cannot be reabstracted.
///
/// - In a protocol extension, the callback doesn't know how to capture
/// or reconstruct the generic conformance information.
///
/// - The abstraction pattern of the variable from the witness may
/// differ from the abstraction pattern of the protocol, likely forcing
/// a completely different access pattern (e.g. to write back a
/// reabstracted value instead of modifying it in-place).
///
/// \return true if special code was emitted
bool SILGenFunction::
maybeEmitMaterializeForSetThunk(ProtocolConformance *conformance,
FuncDecl *requirement, FuncDecl *witness,
ArrayRef<Substitution> witnessSubs,
ArrayRef<ManagedValue> origParams) {
// Break apart the parameters. self comes last, the result buffer
// comes first, the callback storage buffer comes second, and the
// rest are indices.
ManagedValue self = origParams.back();
SILValue resultBuffer = origParams[0].getUnmanagedValue();
SILValue callbackBuffer = origParams[1].getUnmanagedValue();
ArrayRef<ManagedValue> indices = origParams.slice(2).drop_back();
MaterializeForSetEmitter emitter(SGM, conformance, requirement, witness,
witnessSubs, self.getType());
if (!emitter.shouldOpenCode())
return false;
emitter.emit(*this, self, resultBuffer, callbackBuffer, indices);
return true;
}
示例11: if
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
MachineInstr *LoadMI) {
if (Ops.empty())
return false;
// Don't attempt folding in bundles.
MachineInstr *MI = Ops.front().first;
if (Ops.back().first != MI || MI->isBundled())
return false;
bool WasCopy = MI->isCopy();
unsigned ImpReg = 0;
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
unsigned Idx = Ops[i].second;
MachineOperand &MO = MI->getOperand(Idx);
if (MO.isImplicit()) {
ImpReg = MO.getReg();
continue;
}
// FIXME: Teach targets to deal with subregs.
if (MO.getSubReg())
return false;
// We cannot fold a load instruction into a def.
if (LoadMI && MO.isDef())
return false;
// Tied use operands should not be passed to foldMemoryOperand.
if (!MI->isRegTiedToDefOperand(Idx))
FoldOps.push_back(Idx);
}
MachineInstr *FoldMI =
LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
: TII.foldMemoryOperand(MI, FoldOps, StackSlot);
if (!FoldMI)
return false;
LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
MI->eraseFromParent();
// TII.foldMemoryOperand may have left some implicit operands on the
// instruction. Strip them.
if (ImpReg)
for (unsigned i = FoldMI->getNumOperands(); i; --i) {
MachineOperand &MO = FoldMI->getOperand(i - 1);
if (!MO.isReg() || !MO.isImplicit())
break;
if (MO.getReg() == ImpReg)
FoldMI->RemoveOperand(i - 1);
}
DEBUG(dbgs() << "\tfolded: " << LIS.getInstructionIndex(FoldMI) << '\t'
<< *FoldMI);
if (!WasCopy)
++NumFolded;
else if (Ops.front().second == 0)
++NumSpills;
else
++NumReloads;
return true;
}
示例12: MIS
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
MachineInstr *LoadMI) {
if (Ops.empty())
return false;
// Don't attempt folding in bundles.
MachineInstr *MI = Ops.front().first;
if (Ops.back().first != MI || MI->isBundled())
return false;
bool WasCopy = MI->isCopy();
unsigned ImpReg = 0;
bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::PATCHPOINT ||
MI->getOpcode() == TargetOpcode::STACKMAP);
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
unsigned Idx = Ops[i].second;
MachineOperand &MO = MI->getOperand(Idx);
if (MO.isImplicit()) {
ImpReg = MO.getReg();
continue;
}
// FIXME: Teach targets to deal with subregs.
if (!SpillSubRegs && MO.getSubReg())
return false;
// We cannot fold a load instruction into a def.
if (LoadMI && MO.isDef())
return false;
// Tied use operands should not be passed to foldMemoryOperand.
if (!MI->isRegTiedToDefOperand(Idx))
FoldOps.push_back(Idx);
}
MachineInstrSpan MIS(MI);
MachineInstr *FoldMI =
LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
: TII.foldMemoryOperand(MI, FoldOps, StackSlot);
if (!FoldMI)
return false;
// Remove LIS for any dead defs in the original MI not in FoldMI.
for (MIBundleOperands MO(MI); MO.isValid(); ++MO) {
if (!MO->isReg())
continue;
unsigned Reg = MO->getReg();
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
MRI.isReserved(Reg)) {
continue;
}
// Skip non-Defs, including undef uses and internal reads.
if (MO->isUse())
continue;
MIBundleOperands::PhysRegInfo RI =
MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
if (RI.Defines)
continue;
// FoldMI does not define this physreg. Remove the LI segment.
assert(MO->isDead() && "Cannot fold physreg def");
for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) {
if (LiveRange *LR = LIS.getCachedRegUnit(*Units)) {
SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
if (VNInfo *VNI = LR->getVNInfoAt(Idx))
LR->removeValNo(VNI);
}
}
}
LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
MI->eraseFromParent();
// Insert any new instructions other than FoldMI into the LIS maps.
assert(!MIS.empty() && "Unexpected empty span of instructions!");
for (MachineBasicBlock::iterator MII = MIS.begin(), End = MIS.end();
MII != End; ++MII)
if (&*MII != FoldMI)
LIS.InsertMachineInstrInMaps(&*MII);
// TII.foldMemoryOperand may have left some implicit operands on the
// instruction. Strip them.
if (ImpReg)
for (unsigned i = FoldMI->getNumOperands(); i; --i) {
MachineOperand &MO = FoldMI->getOperand(i - 1);
if (!MO.isReg() || !MO.isImplicit())
break;
if (MO.getReg() == ImpReg)
FoldMI->RemoveOperand(i - 1);
}
DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
//.........这里部分代码省略.........
示例13: Handler
bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
CallingConv::ID CallConv,
const MachineOperand &Callee,
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
auto &DL = F.getParent()->getDataLayout();
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
const TargetInstrInfo &TII = *STI.getInstrInfo();
auto TRI = STI.getRegisterInfo();
// Handle only Linux C, X86_64_SysV calling conventions for now.
if (!STI.isTargetLinux() ||
!(CallConv == CallingConv::C || CallConv == CallingConv::X86_64_SysV))
return false;
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
auto CallSeqStart = MIRBuilder.buildInstr(AdjStackDown);
// Create a temporarily-floating call instruction so we can add the implicit
// uses of arg registers.
bool Is64Bit = STI.is64Bit();
unsigned CallOpc = Callee.isReg()
? (Is64Bit ? X86::CALL64r : X86::CALL32r)
: (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc).add(Callee).addRegMask(
TRI->getCallPreservedMask(MF, CallConv));
SmallVector<ArgInfo, 8> SplitArgs;
for (const auto &OrigArg : OrigArgs) {
// TODO: handle not simple cases.
if (OrigArg.Flags.isByVal())
return false;
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs) {
MIRBuilder.buildUnmerge(Regs, OrigArg.Reg);
}))
return false;
}
// Do the actual argument marshalling.
OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, CC_X86);
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
return false;
bool IsFixed = OrigArgs.empty() ? true : OrigArgs.back().IsFixed;
if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(CallConv)) {
// From AMD64 ABI document:
// For calls that may call functions that use varargs or stdargs
// (prototype-less calls or calls to functions containing ellipsis (...) in
// the declaration) %al is used as hidden argument to specify the number
// of SSE registers used. The contents of %al do not need to match exactly
// the number of registers, but must be an ubound on the number of SSE
// registers used and is in the range 0 - 8 inclusive.
MIRBuilder.buildInstr(X86::MOV8ri)
.addDef(X86::AL)
.addImm(Handler.getNumXmmRegs());
MIB.addUse(X86::AL, RegState::Implicit);
}
// Now we can add the actual call instruction to the correct basic block.
MIRBuilder.insertInstr(MIB);
// If Callee is a reg, since it is used by a target specific
// instruction, it must have a register class matching the
// constraint of that instruction.
if (Callee.isReg())
MIB->getOperand(0).setReg(constrainOperandRegClass(
MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
*MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0));
// Finally we can copy the returned value back into its virtual-register. In
// symmetry with the arguments, the physical register must be an
// implicit-define of the call instruction.
if (OrigRet.Reg) {
SplitArgs.clear();
SmallVector<unsigned, 8> NewRegs;
if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
[&](ArrayRef<unsigned> Regs) {
NewRegs.assign(Regs.begin(), Regs.end());
}))
return false;
CallReturnHandler Handler(MIRBuilder, MRI, RetCC_X86, MIB);
if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
return false;
if (!NewRegs.empty())
MIRBuilder.buildMerge(OrigRet.Reg, NewRegs);
}
CallSeqStart.addImm(Handler.getStackSize())
.addImm(0 /* see getFrameTotalSize */)
//.........这里部分代码省略.........
示例14:
Expr *getActualLastValue() const {
return Values.back();
}
示例15: if
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops Operand indices from analyzeVirtReg().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops,
MachineInstr *LoadMI) {
if (Ops.empty())
return false;
// Don't attempt folding in bundles.
MachineInstr *MI = Ops.front().first;
if (Ops.back().first != MI || MI->isBundled())
return false;
bool WasCopy = MI->isCopy();
unsigned ImpReg = 0;
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
unsigned Idx = Ops[i].second;
MachineOperand &MO = MI->getOperand(Idx);
if (MO.isImplicit()) {
ImpReg = MO.getReg();
continue;
}
// FIXME: Teach targets to deal with subregs.
if (MO.getSubReg())
return false;
// We cannot fold a load instruction into a def.
if (LoadMI && MO.isDef())
return false;
// Tied use operands should not be passed to foldMemoryOperand.
if (!MI->isRegTiedToDefOperand(Idx))
FoldOps.push_back(Idx);
}
MachineInstr *FoldMI =
LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
: TII.foldMemoryOperand(MI, FoldOps, StackSlot);
if (!FoldMI)
return false;
// Remove LIS for any dead defs in the original MI not in FoldMI.
for (MIBundleOperands MO(MI); MO.isValid(); ++MO) {
if (!MO->isReg())
continue;
unsigned Reg = MO->getReg();
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg) ||
MRI.isReserved(Reg)) {
continue;
}
MIBundleOperands::PhysRegInfo RI =
MIBundleOperands(FoldMI).analyzePhysReg(Reg, &TRI);
if (MO->readsReg()) {
assert(RI.Reads && "Cannot fold physreg reader");
continue;
}
if (RI.Defines)
continue;
// FoldMI does not define this physreg. Remove the LI segment.
assert(MO->isDead() && "Cannot fold physreg def");
for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) {
if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) {
SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot();
if (VNInfo *VNI = LI->getVNInfoAt(Idx))
LI->removeValNo(VNI);
}
}
}
LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
MI->eraseFromParent();
// TII.foldMemoryOperand may have left some implicit operands on the
// instruction. Strip them.
if (ImpReg)
for (unsigned i = FoldMI->getNumOperands(); i; --i) {
MachineOperand &MO = FoldMI->getOperand(i - 1);
if (!MO.isReg() || !MO.isImplicit())
break;
if (MO.getReg() == ImpReg)
FoldMI->RemoveOperand(i - 1);
}
DEBUG(dbgs() << "\tfolded: " << LIS.getInstructionIndex(FoldMI) << '\t'
<< *FoldMI);
if (!WasCopy)
++NumFolded;
else if (Ops.front().second == 0)
++NumSpills;
else
++NumReloads;
return true;
}