本文整理汇总了C++中AllocaInst::getArraySize方法的典型用法代码示例。如果您正苦于以下问题:C++ AllocaInst::getArraySize方法的具体用法?C++ AllocaInst::getArraySize怎么用?C++ AllocaInst::getArraySize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AllocaInst
的用法示例。
在下文中一共展示了AllocaInst::getArraySize方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: visitAlloca
bool CallAnalyzer::visitAlloca(AllocaInst &I) {
// Check whether inlining will turn a dynamic alloca into a static
// alloca, and handle that case.
if (I.isArrayAllocation()) {
if (Constant *Size = SimplifiedValues.lookup(I.getArraySize())) {
ConstantInt *AllocSize = dyn_cast<ConstantInt>(Size);
assert(AllocSize && "Allocation size not a constant int?");
Type *Ty = I.getAllocatedType();
AllocatedSize += Ty->getPrimitiveSizeInBits() * AllocSize->getZExtValue();
return Base::visitAlloca(I);
}
}
// Accumulate the allocated size.
if (I.isStaticAlloca()) {
Type *Ty = I.getAllocatedType();
AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :
Ty->getPrimitiveSizeInBits());
}
// We will happily inline static alloca instructions.
if (I.isStaticAlloca())
return Base::visitAlloca(I);
// FIXME: This is overly conservative. Dynamic allocas are inefficient for
// a variety of reasons, and so we would like to not inline them into
// functions which don't currently have a dynamic alloca. This simply
// disables inlining altogether in the presence of a dynamic alloca.
HasDynamicAlloca = true;
return false;
}
示例2: visitAllocaInst
void Lint::visitAllocaInst(AllocaInst &I) {
if (isa<ConstantInt>(I.getArraySize()))
// This isn't undefined behavior, it's just an obvious pessimization.
Assert(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
"Pessimization: Static alloca outside of entry block", &I);
// TODO: Check for an unusual size (MSB set?)
}
示例3: visitAllocaInst
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
if (!I.getAllocatedType()->isSized())
return unknown();
// must be a VLA
assert(I.isArrayAllocation());
Value *ArraySize = I.getArraySize();
Value *Size = ConstantInt::get(ArraySize->getType(),
TD->getTypeAllocSize(I.getAllocatedType()));
Size = Builder.CreateMul(Size, ArraySize);
return std::make_pair(Size, Zero);
}
示例4: runOnFunction
bool NVPTXAllocaHoisting::runOnFunction(Function &function) {
bool functionModified = false;
Function::iterator I = function.begin();
TerminatorInst *firstTerminatorInst = (I++)->getTerminator();
for (Function::iterator E = function.end(); I != E; ++I) {
for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) {
AllocaInst *allocaInst = dyn_cast<AllocaInst>(BI++);
if (allocaInst && isa<ConstantInt>(allocaInst->getArraySize())) {
allocaInst->moveBefore(firstTerminatorInst);
functionModified = true;
}
}
}
return functionModified;
}
示例5: translateStaticAlloca
bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) {
assert(AI.isStaticAlloca() && "only handle static allocas now");
MachineFunction &MF = MIRBuilder.getMF();
unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
unsigned Size =
ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
// Always allocate at least one byte.
Size = std::max(Size, 1u);
unsigned Alignment = AI.getAlignment();
if (!Alignment)
Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
unsigned Res = getOrCreateVReg(AI);
int FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
MIRBuilder.buildFrameIndex(LLT::pointer(0), Res, FI);
return true;
}
示例6: getOrCreateFrameIndex
int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
if (FrameIndices.find(&AI) != FrameIndices.end())
return FrameIndices[&AI];
MachineFunction &MF = MIRBuilder.getMF();
unsigned ElementSize = DL->getTypeStoreSize(AI.getAllocatedType());
unsigned Size =
ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
// Always allocate at least one byte.
Size = std::max(Size, 1u);
unsigned Alignment = AI.getAlignment();
if (!Alignment)
Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
int &FI = FrameIndices[&AI];
FI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
return FI;
}
示例7: visitAllocaInst
SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
if (!I.getAllocatedType()->isSized())
return unknown();
APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType()));
if (!I.isArrayAllocation())
return std::make_pair(align(Size, I.getAlignment()), Zero);
Value *ArraySize = I.getArraySize();
if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
APInt NumElems = C->getValue();
if (!CheckedZextOrTrunc(NumElems))
return unknown();
bool Overflow;
Size = Size.umul_ov(NumElems, Overflow);
return Overflow ? unknown() : std::make_pair(align(Size, I.getAlignment()),
Zero);
}
return unknown();
}
示例8: performCallSlotOptzn
/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
/// and checks for the possibility of a call slot optimization by having
/// the call write its result directly into the destination of the memcpy.
bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
Value *cpyDest, Value *cpySrc,
uint64_t cpyLen, unsigned cpyAlign,
CallInst *C) {
// The general transformation to keep in mind is
//
// call @func(..., src, ...)
// memcpy(dest, src, ...)
//
// ->
//
// memcpy(dest, src, ...)
// call @func(..., dest, ...)
//
// Since moving the memcpy is technically awkward, we additionally check that
// src only holds uninitialized values at the moment of the call, meaning that
// the memcpy can be discarded rather than moved.
// Deliberately get the source and destination with bitcasts stripped away,
// because we'll need to do type comparisons based on the underlying type.
CallSite CS(C);
// Require that src be an alloca. This simplifies the reasoning considerably.
AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
if (!srcAlloca)
return false;
// Check that all of src is copied to dest.
if (TD == 0) return false;
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
return false;
uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
if (cpyLen < srcSize)
return false;
// Check that dest points to memory that is at least as aligned as src.
unsigned srcAlign = srcAlloca->getAlignment();
if (!srcAlign)
srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType());
bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
// If dest is not aligned enough and we can't increase its alignment then
// bail out.
if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
return false;
// Check that accessing the first srcSize bytes of dest will not cause a
// trap. Otherwise the transform is invalid since it might cause a trap
// to occur earlier than it otherwise would.
if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
// The destination is an alloca. Check it is larger than srcSize.
ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
if (!destArraySize)
return false;
uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
destArraySize->getZExtValue();
if (destSize < srcSize)
return false;
} else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
// If the destination is an sret parameter then only accesses that are
// outside of the returned struct type can trap.
if (!A->hasStructRetAttr())
return false;
Type *StructTy = cast<PointerType>(A->getType())->getElementType();
uint64_t destSize = TD->getTypeAllocSize(StructTy);
if (destSize < srcSize)
return false;
} else {
return false;
}
// Check that src is not accessed except via the call and the memcpy. This
// guarantees that it holds only undefined values when passed in (so the final
// memcpy can be dropped), that it is not read or written between the call and
// the memcpy, and that writing beyond the end of it is undefined.
SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
srcAlloca->use_end());
while (!srcUseList.empty()) {
User *UI = srcUseList.pop_back_val();
if (isa<BitCastInst>(UI)) {
for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
I != E; ++I)
srcUseList.push_back(*I);
} else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
if (G->hasAllZeroIndices())
for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
I != E; ++I)
srcUseList.push_back(*I);
//.........这里部分代码省略.........
示例9: performCallSlotOptzn
/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
/// and checks for the possibility of a call slot optimization by having
/// the call write its result directly into the destination of the memcpy.
bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
Value *cpyDest, Value *cpySrc,
uint64_t cpyLen, unsigned cpyAlign,
CallInst *C) {
// The general transformation to keep in mind is
//
// call @func(..., src, ...)
// memcpy(dest, src, ...)
//
// ->
//
// memcpy(dest, src, ...)
// call @func(..., dest, ...)
//
// Since moving the memcpy is technically awkward, we additionally check that
// src only holds uninitialized values at the moment of the call, meaning that
// the memcpy can be discarded rather than moved.
// Deliberately get the source and destination with bitcasts stripped away,
// because we'll need to do type comparisons based on the underlying type.
CallSite CS(C);
// Require that src be an alloca. This simplifies the reasoning considerably.
AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
if (!srcAlloca)
return false;
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
return false;
const DataLayout &DL = cpy->getModule()->getDataLayout();
uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
if (cpyLen < srcSize)
return false;
// Check that accessing the first srcSize bytes of dest will not cause a
// trap. Otherwise the transform is invalid since it might cause a trap
// to occur earlier than it otherwise would.
if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
// The destination is an alloca. Check it is larger than srcSize.
ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
if (!destArraySize)
return false;
uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
destArraySize->getZExtValue();
if (destSize < srcSize)
return false;
} else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
if (A->getDereferenceableBytes() < srcSize) {
// If the destination is an sret parameter then only accesses that are
// outside of the returned struct type can trap.
if (!A->hasStructRetAttr())
return false;
Type *StructTy = cast<PointerType>(A->getType())->getElementType();
if (!StructTy->isSized()) {
// The call may never return and hence the copy-instruction may never
// be executed, and therefore it's not safe to say "the destination
// has at least <cpyLen> bytes, as implied by the copy-instruction",
return false;
}
uint64_t destSize = DL.getTypeAllocSize(StructTy);
if (destSize < srcSize)
return false;
}
} else {
return false;
}
// Check that dest points to memory that is at least as aligned as src.
unsigned srcAlign = srcAlloca->getAlignment();
if (!srcAlign)
srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
// If dest is not aligned enough and we can't increase its alignment then
// bail out.
if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
return false;
// Check that src is not accessed except via the call and the memcpy. This
// guarantees that it holds only undefined values when passed in (so the final
// memcpy can be dropped), that it is not read or written between the call and
// the memcpy, and that writing beyond the end of it is undefined.
SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
srcAlloca->user_end());
while (!srcUseList.empty()) {
User *U = srcUseList.pop_back_val();
if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
for (User *UU : U->users())
srcUseList.push_back(UU);
//.........这里部分代码省略.........
示例10: InlineFunction
//.........这里部分代码省略.........
CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
/*ModuleLevelChanges=*/false, Returns, ".i",
&InlinedFunctionInfo, IFI.TD, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
// Update the callgraph if requested.
if (IFI.CG)
UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
// Update inlined instructions' line number information.
fixupLineNumbers(Caller, FirstNewBlock, TheCall);
}
// If there are any alloca instructions in the block that used to be the entry
// block for the callee, move them to the entry block of the caller. First
// calculate which instruction they should be inserted before. We insert the
// instructions at the end of the current alloca list.
{
BasicBlock::iterator InsertPoint = Caller->begin()->begin();
for (BasicBlock::iterator I = FirstNewBlock->begin(),
E = FirstNewBlock->end(); I != E; ) {
AllocaInst *AI = dyn_cast<AllocaInst>(I++);
if (AI == 0) continue;
// If the alloca is now dead, remove it. This often occurs due to code
// specialization.
if (AI->use_empty()) {
AI->eraseFromParent();
continue;
}
if (!isa<Constant>(AI->getArraySize()))
continue;
// Keep track of the static allocas that we inline into the caller.
IFI.StaticAllocas.push_back(AI);
// Scan for the block of allocas that we can move over, and move them
// all at once.
while (isa<AllocaInst>(I) &&
isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
++I;
}
// Transfer all of the allocas over in a block. Using splice means
// that the instructions aren't removed from the symbol table, then
// reinserted.
Caller->getEntryBlock().getInstList().splice(InsertPoint,
FirstNewBlock->getInstList(),
AI, I);
}
}
// Leave lifetime markers for the static alloca's, scoping them to the
// function we just inlined.
if (InsertLifetime && !IFI.StaticAllocas.empty()) {
IRBuilder<> builder(FirstNewBlock->begin());
for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
AllocaInst *AI = IFI.StaticAllocas[ai];
// If the alloca is already scoped to something smaller than the whole
// function then there's no need to add redundant, less accurate markers.
if (hasLifetimeMarkers(AI))
示例11: rewrite_omp_call_sites
/*
* Rewrite OpenMP call sites and their associated kernel functions -- the folloiwng pattern
call void @GOMP_parallel_start(void (i8*)* @_Z20initialize_variablesiPfS_.omp_fn.4, i8* %.omp_data_o.5571, i32 0) nounwind
call void @_Z20initialize_variablesiPfS_.omp_fn.4(i8* %.omp_data_o.5571) nounwind
call void @GOMP_parallel_end() nounwind
*/
void HeteroOMPTransform::rewrite_omp_call_sites(Module &M) {
SmallVector<Instruction *, 16> toDelete;
DenseMap<Value *, Value *> ValueMap;
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I){
if (!I->isDeclaration()) {
for (Function::iterator BBI = I->begin(), BBE = I->end(); BBI != BBE; ++BBI) {
bool match = false;
for (BasicBlock::iterator INSNI = BBI->begin(), INSNE = BBI->end(); INSNI != INSNE; ++INSNI) {
if (isa<CallInst>(INSNI)) {
CallSite CI(cast<Instruction>(INSNI));
if (CI.getCalledFunction() != NULL){
string called_func_name = CI.getCalledFunction()->getName();
if (called_func_name == OMP_PARALLEL_START_NAME && CI.arg_size() == 3) {
// change alloc to malloc_shared
// %5 = call i8* @_Z13malloc_sharedm(i64 20) ; <i8*> [#uses=5]
// %6 = bitcast i8* %5 to float* ; <float*> [#uses=2]
AllocaInst *AllocCall;
Value *arg_0 = CI.getArgument(0); // function
Value *arg_1 = CI.getArgument(1); // context
Value *loop_ub = NULL;
Function *function;
BitCastInst* BCI;
Function *kernel_function;
BasicBlock::iterator iI(*INSNI);
//BasicBlock::iterator iJ = iI+1;
iI++; iI++;
//BasicBlock::iterator iK = iI;
CallInst /**next,*/ *next_next;
if (arg_0 != NULL && arg_1 != NULL /*&& (next = dyn_cast<CallInst>(*iJ))*/
&& (next_next = dyn_cast<CallInst>(iI)) && (next_next->getCalledFunction() != NULL)
&& (next_next->getCalledFunction()->getName() == OMP_PARALLEL_END_NAME)
&& (BCI = dyn_cast<BitCastInst>(arg_1)) && (AllocCall = dyn_cast<AllocaInst>(BCI->getOperand(0)))
&& (function = dyn_cast<Function>(arg_0)) && (loop_ub = find_loop_upper_bound (AllocCall))
&& (kernel_function=convert_to_kernel_function (M, function))){
SmallVector<Value*, 16> Args;
Args.push_back(AllocCall->getArraySize());
Instruction *MallocCall = CallInst::Create(mallocFnTy, Args, "", AllocCall);
CastInst *MallocCast = CastInst::Create(Instruction::BitCast, MallocCall, AllocCall->getType(), "", AllocCall);
ValueMap[AllocCall] = MallocCast;
//AllocCall->replaceAllUsesWith(MallocCall);
// Add offload function
Args.clear();
Args.push_back(loop_ub);
Args.push_back(BCI);
Args.push_back(kernel_function);
if (offloadFnTy == NULL) {
init_offload_type(M, kernel_function);
}
Instruction *call = CallInst::Create(offloadFnTy, Args, "", INSNI);
if (find(toDelete.begin(), toDelete.end(), AllocCall) == toDelete.end()){
toDelete.push_back(AllocCall);
}
toDelete.push_back(&(*INSNI));
match = true;
}
}
else if (called_func_name == OMP_PARALLEL_END_NAME && CI.arg_size() == 0 && match) {
toDelete.push_back(&(*INSNI));
match = false;
}
else if (match) {
toDelete.push_back(&(*INSNI));
}
}
}
}
}
}
}
/* Replace AllocCalls by MallocCalls */
for(DenseMap<Value *, Value *>::iterator I = ValueMap.begin(), E = ValueMap.end(); I != E; I++) {
I->first->replaceAllUsesWith(I->second);
}
/* delete the instructions for get_omp_num_thread and get_omp_thread_num */
while(!toDelete.empty()) {
Instruction *g = toDelete.back();
toDelete.pop_back();
g->eraseFromParent();
}
}
示例12: while
//
// Method: insertBadAllocationSizes()
//
// Description:
// This method will look for allocations and change their size to be
// incorrect. It does the following:
// o) Changes the number of array elements allocated by alloca and malloc.
//
// Return value:
// true - The module was modified.
// false - The module was left unmodified.
//
bool
FaultInjector::insertBadAllocationSizes (Function & F) {
// Worklist of allocation sites to rewrite
std::vector<AllocaInst * > WorkList;
for (Function::iterator fI = F.begin(), fE = F.end(); fI != fE; ++fI) {
BasicBlock & BB = *fI;
for (BasicBlock::iterator I = BB.begin(), bE = BB.end(); I != bE; ++I) {
if (AllocaInst * AI = dyn_cast<AllocaInst>(I)) {
if (AI->isArrayAllocation()) {
// Skip if we should not insert a fault.
if (!doFault()) continue;
WorkList.push_back(AI);
}
}
}
}
while (WorkList.size()) {
AllocaInst * AI = WorkList.back();
WorkList.pop_back();
//
// Print information about where the fault is being inserted.
//
printSourceInfo ("Bad allocation size", AI);
Instruction * NewAlloc = 0;
NewAlloc = new AllocaInst (AI->getAllocatedType(),
ConstantInt::get(Int32Type,0),
AI->getAlignment(),
AI->getName(),
AI);
AI->replaceAllUsesWith (NewAlloc);
AI->eraseFromParent();
++BadSizes;
}
//
// Try harder to make bad allocation sizes.
//
WorkList.clear();
for (Function::iterator fI = F.begin(), fE = F.end(); fI != fE; ++fI) {
BasicBlock & BB = *fI;
for (BasicBlock::iterator I = BB.begin(), bE = BB.end(); I != bE; ++I) {
if (AllocaInst * AI = dyn_cast<AllocaInst>(I)) {
//
// Determine if this is a data type that we can make smaller.
//
if (((TD->getTypeAllocSize(AI->getAllocatedType())) > 4) && doFault()) {
WorkList.push_back(AI);
}
}
}
}
//
// Replace these allocations with an allocation of an integer and cast the
// result back into the appropriate type.
//
while (WorkList.size()) {
AllocaInst * AI = WorkList.back();
WorkList.pop_back();
Instruction * NewAlloc = 0;
NewAlloc = new AllocaInst (Int32Type,
AI->getArraySize(),
AI->getAlignment(),
AI->getName(),
AI);
NewAlloc = castTo (NewAlloc, AI->getType(), "", AI);
AI->replaceAllUsesWith (NewAlloc);
AI->eraseFromParent();
++BadSizes;
}
return (BadSizes > 0);
}
示例13: InlineFunction
//.........这里部分代码省略.........
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, Returns, ".i",
&InlinedFunctionInfo, IFI.TD, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
// Update the callgraph if requested.
if (IFI.CG)
UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
}
// If there are any alloca instructions in the block that used to be the entry
// block for the callee, move them to the entry block of the caller. First
// calculate which instruction they should be inserted before. We insert the
// instructions at the end of the current alloca list.
//
{
BasicBlock::iterator InsertPoint = Caller->begin()->begin();
for (BasicBlock::iterator I = FirstNewBlock->begin(),
E = FirstNewBlock->end(); I != E; ) {
AllocaInst *AI = dyn_cast<AllocaInst>(I++);
if (AI == 0) continue;
// If the alloca is now dead, remove it. This often occurs due to code
// specialization.
if (AI->use_empty()) {
AI->eraseFromParent();
continue;
}
if (!isa<Constant>(AI->getArraySize()))
continue;
// Keep track of the static allocas that we inline into the caller if the
// StaticAllocas pointer is non-null.
IFI.StaticAllocas.push_back(AI);
// Scan for the block of allocas that we can move over, and move them
// all at once.
while (isa<AllocaInst>(I) &&
isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
++I;
}
// Transfer all of the allocas over in a block. Using splice means
// that the instructions aren't removed from the symbol table, then
// reinserted.
Caller->getEntryBlock().getInstList().splice(InsertPoint,
FirstNewBlock->getInstList(),
AI, I);
}
}
// If the inlined code contained dynamic alloca instructions, wrap the inlined
// code with llvm.stacksave/llvm.stackrestore intrinsics.
if (InlinedFunctionInfo.ContainsDynamicAllocas) {
Module *M = Caller->getParent();
// Get the two intrinsics we care about.
Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
// If we are preserving the callgraph, add edges to the stacksave/restore