本文整理汇总了C++中AllocaInst类的典型用法代码示例。如果您正苦于以下问题:C++ AllocaInst类的具体用法?C++ AllocaInst怎么用?C++ AllocaInst使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AllocaInst类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: errs
AllocaInst* Variables::changeLocal(Value* value, PointerType* newType) {
AllocaInst *oldTarget = dyn_cast<AllocaInst>(value);
PointerType* oldPointerType = dyn_cast<PointerType>(oldTarget->getType());
PointerType *oldType = dyn_cast<PointerType>(oldPointerType->getElementType());
AllocaInst *newTarget = NULL;
errs() << "Changing the precision of pointer variable \"" << oldTarget->getName() << "\" from " << *oldType
<< " to " << *newType << ".\n";
if (diffTypes(newType, oldType)) {
newTarget = new AllocaInst(newType, getInt32(1), "", oldTarget);
// we are not calling getAlignment because in this case double requires 16. Investigate further.
unsigned alignment;
switch(newType->getElementType()->getTypeID()) {
case Type::FloatTyID:
alignment = 4;
break;
case Type::DoubleTyID:
alignment = 8;
break;
case Type::X86_FP80TyID:
alignment = 16;
break;
default:
alignment = 0;
}
newTarget->setAlignment(alignment); // depends on type? 8 for float? 16 for double?
newTarget->takeName(oldTarget);
// iterating through instructions using old AllocaInst
vector<Instruction*> erase;
Value::use_iterator it = oldTarget->use_begin();
#ifdef DEBUG
errs() << "\nOld target: ";
oldTarget->dump();
#endif
for(; it != oldTarget->use_end(); it++) {
#ifdef DEBUG
errs() << "\nA use: ";
it->dump();
errs() << "\n===============================\n";
errs() << "\nTransforming use\n";
#endif
bool is_erased = Transformer::transform(it, newTarget, oldTarget, newType, oldType, alignment);
if (!is_erased) {
erase.push_back(dyn_cast<Instruction>(*it));
}
#ifdef DEBUG
errs() << "\nDone transforming use\n";
#endif
}
// erasing uses of old instructions
for(unsigned int i = 0; i < erase.size(); i++) {
erase[i]->eraseFromParent();
}
// erase old instruction
//oldTarget->eraseFromParent();
#ifdef DEBUG
errs() << "DONE ALL TRANSFORMATION FOR POINTER\n";
#endif
} else {
errs() << "\tNo changes required.\n";
}
return newTarget;
}
示例2: while
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
if (DL) {
Type *IntPtrTy = DL->getIntPtrType(AI.getType());
if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = Builder->CreateIntCast(AI.getArraySize(),
IntPtrTy, false);
AI.setOperand(0, V);
return &AI;
}
}
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
if (AI.isArrayAllocation()) { // Check C != 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
New->setAlignment(AI.getAlignment());
// Scan to the end of the allocation instructions, to skip over a block of
// allocas if possible...also skip interleaved debug info
//
BasicBlock::iterator It = New;
while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
// Now that I is pointing to the first non-allocation-inst in the block,
// insert our getelementptr instruction...
//
Type *IdxTy = DL
? DL->getIntPtrType(AI.getType())
: Type::getInt64Ty(AI.getContext());
Value *NullIdx = Constant::getNullValue(IdxTy);
Value *Idx[2] = { NullIdx, NullIdx };
Instruction *GEP =
GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
InsertNewInstBefore(GEP, *It);
// Now make everything use the getelementptr instead of the original
// allocation.
return ReplaceInstUsesWith(AI, GEP);
} else if (isa<UndefValue>(AI.getArraySize())) {
return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
}
}
if (DL && AI.getAllocatedType()->isSized()) {
// If the alignment is 0 (unspecified), assign it the preferred alignment.
if (AI.getAlignment() == 0)
AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
// Move all alloca's of zero byte objects to the entry block and merge them
// together. Note that we only do this for alloca's, because malloc should
// allocate and return a unique pointer, even for a zero byte allocation.
if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
// For a zero sized alloca there is no point in doing an array allocation.
// This is helpful if the array size is a complicated expression not used
// elsewhere.
if (AI.isArrayAllocation()) {
AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
return &AI;
}
// Get the first instruction in the entry block.
BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
if (FirstInst != &AI) {
// If the entry block doesn't start with a zero-size alloca then move
// this one to the start of the entry block. There is no problem with
// dominance as the array size was forced to a constant earlier already.
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
AI.moveBefore(FirstInst);
return &AI;
}
// If the alignment of the entry block alloca is 0 (unspecified),
// assign it the preferred alignment.
if (EntryAI->getAlignment() == 0)
EntryAI->setAlignment(
DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
// Replace this zero-sized alloca with the one at the start of the entry
// block after ensuring that the address will be aligned enough for both
// types.
unsigned MaxAlign = std::max(EntryAI->getAlignment(),
AI.getAlignment());
EntryAI->setAlignment(MaxAlign);
if (AI.getType() != EntryAI->getType())
return new BitCastInst(EntryAI, AI.getType());
return ReplaceInstUsesWith(AI, EntryAI);
}
}
}
if (AI.getAlignment()) {
// Check to see if this allocation is only modified by a memcpy/memmove from
// a constant global whose alignment is equal to or exceeds that of the
// allocation. If this is the case, we can change all users to use
//.........这里部分代码省略.........
示例3: handleAlloca
// FIXME: Should try to pick the most likely to be profitable allocas first.
void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) {
// Array allocations are probably not worth handling, since an allocation of
// the array type is the canonical form.
if (!I.isStaticAlloca() || I.isArrayAllocation())
return;
IRBuilder<> Builder(&I);
// First try to replace the alloca with a vector
Type *AllocaTy = I.getAllocatedType();
DEBUG(dbgs() << "Trying to promote " << I << '\n');
if (tryPromoteAllocaToVector(&I)) {
DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");
return;
}
const Function &ContainingFunction = *I.getParent()->getParent();
// Don't promote the alloca to LDS for shader calling conventions as the work
// item ID intrinsics are not supported for these calling conventions.
// Furthermore not all LDS is available for some of the stages.
if (AMDGPU::isShader(ContainingFunction.getCallingConv()))
return;
// FIXME: We should also try to get this value from the reqd_work_group_size
// function attribute if it is available.
unsigned WorkGroupSize = AMDGPU::getMaximumWorkGroupSize(ContainingFunction);
const DataLayout &DL = Mod->getDataLayout();
unsigned Align = I.getAlignment();
if (Align == 0)
Align = DL.getABITypeAlignment(I.getAllocatedType());
// FIXME: This computed padding is likely wrong since it depends on inverse
// usage order.
//
// FIXME: It is also possible that if we're allowed to use all of the memory
// could could end up using more than the maximum due to alignment padding.
uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
NewSize += AllocSize;
if (NewSize > LocalMemLimit) {
DEBUG(dbgs() << " " << AllocSize
<< " bytes of local memory not available to promote\n");
return;
}
CurrentLocalMemUsage = NewSize;
std::vector<Value*> WorkList;
if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
DEBUG(dbgs() << " Do not know how to convert all uses\n");
return;
}
DEBUG(dbgs() << "Promoting alloca to local memory\n");
Function *F = I.getParent()->getParent();
Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
GlobalVariable *GV = new GlobalVariable(
*Mod, GVTy, false, GlobalValue::InternalLinkage,
UndefValue::get(GVTy),
Twine(F->getName()) + Twine('.') + I.getName(),
nullptr,
GlobalVariable::NotThreadLocal,
AMDGPUAS::LOCAL_ADDRESS);
GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
GV->setAlignment(I.getAlignment());
Value *TCntY, *TCntZ;
std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
Value *TIdX = getWorkitemID(Builder, 0);
Value *TIdY = getWorkitemID(Builder, 1);
Value *TIdZ = getWorkitemID(Builder, 2);
Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
Tmp0 = Builder.CreateMul(Tmp0, TIdX);
Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
TID = Builder.CreateAdd(TID, TIdZ);
Value *Indices[] = {
Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
TID
};
Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
I.mutateType(Offset->getType());
I.replaceAllUsesWith(Offset);
I.eraseFromParent();
//.........这里部分代码省略.........
示例4: assert
//.........这里部分代码省略.........
}
VMap[I] = ActualArg;
}
// We want the inliner to prune the code as it copies. We would LOVE to
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
/*ModuleLevelChanges=*/false, Returns, ".i",
&InlinedFunctionInfo, IFI.TD, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
// Update the callgraph if requested.
if (IFI.CG)
UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
// Update inlined instructions' line number information.
fixupLineNumbers(Caller, FirstNewBlock, TheCall);
}
// If there are any alloca instructions in the block that used to be the entry
// block for the callee, move them to the entry block of the caller. First
// calculate which instruction they should be inserted before. We insert the
// instructions at the end of the current alloca list.
//
{
BasicBlock::iterator InsertPoint = Caller->begin()->begin();
for (BasicBlock::iterator I = FirstNewBlock->begin(),
E = FirstNewBlock->end(); I != E; ) {
AllocaInst *AI = dyn_cast<AllocaInst>(I++);
if (AI == 0) continue;
// If the alloca is now dead, remove it. This often occurs due to code
// specialization.
if (AI->use_empty()) {
AI->eraseFromParent();
continue;
}
if (!isa<Constant>(AI->getArraySize()))
continue;
// Keep track of the static allocas that we inline into the caller.
IFI.StaticAllocas.push_back(AI);
// Scan for the block of allocas that we can move over, and move them
// all at once.
while (isa<AllocaInst>(I) &&
isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
++I;
}
// Transfer all of the allocas over in a block. Using splice means
// that the instructions aren't removed from the symbol table, then
// reinserted.
Caller->getEntryBlock().getInstList().splice(InsertPoint,
FirstNewBlock->getInstList(),
AI, I);
}
}
示例5: BitCastInst
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
if (auto *I = simplifyAllocaArraySize(*this, AI))
return I;
if (AI.getAllocatedType()->isSized()) {
// If the alignment is 0 (unspecified), assign it the preferred alignment.
if (AI.getAlignment() == 0)
AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
// Move all alloca's of zero byte objects to the entry block and merge them
// together. Note that we only do this for alloca's, because malloc should
// allocate and return a unique pointer, even for a zero byte allocation.
if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
// For a zero sized alloca there is no point in doing an array allocation.
// This is helpful if the array size is a complicated expression not used
// elsewhere.
if (AI.isArrayAllocation()) {
AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
return &AI;
}
// Get the first instruction in the entry block.
BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
if (FirstInst != &AI) {
// If the entry block doesn't start with a zero-size alloca then move
// this one to the start of the entry block. There is no problem with
// dominance as the array size was forced to a constant earlier already.
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
AI.moveBefore(FirstInst);
return &AI;
}
// If the alignment of the entry block alloca is 0 (unspecified),
// assign it the preferred alignment.
if (EntryAI->getAlignment() == 0)
EntryAI->setAlignment(
DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
// Replace this zero-sized alloca with the one at the start of the entry
// block after ensuring that the address will be aligned enough for both
// types.
unsigned MaxAlign = std::max(EntryAI->getAlignment(),
AI.getAlignment());
EntryAI->setAlignment(MaxAlign);
if (AI.getType() != EntryAI->getType())
return new BitCastInst(EntryAI, AI.getType());
return replaceInstUsesWith(AI, EntryAI);
}
}
}
if (AI.getAlignment()) {
// Check to see if this allocation is only modified by a memcpy/memmove from
// a constant global whose alignment is equal to or exceeds that of the
// allocation. If this is the case, we can change all users to use
// the constant global instead. This is commonly produced by the CFE by
// constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
// is only subsequently read.
SmallVector<Instruction *, 4> ToDelete;
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
unsigned SourceAlign = getOrEnforceKnownAlignment(
Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
if (AI.getAlignment() <= SourceAlign) {
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
eraseInstFromFunction(*ToDelete[i]);
Constant *TheSrc = cast<Constant>(Copy->getSource());
Constant *Cast
= ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
Instruction *NewI = replaceInstUsesWith(AI, Cast);
eraseInstFromFunction(*Copy);
++NumGlobalCopies;
return NewI;
}
}
}
// At last, use the generic allocation site handler to aggressively remove
// unused allocas.
return visitAllocSite(AI);
}
示例6: CollectRoots
/// runOnFunction - Insert code to maintain the shadow stack.
bool ShadowStackGC::performCustomLowering(Function &F) {
LLVMContext &Context = F.getContext();
// Find calls to llvm.gcroot.
CollectRoots(F);
// If there are no roots in this function, then there is no need to add a
// stack map entry for it.
if (Roots.empty())
return false;
// Build the constant map and figure the type of the shadow stack entry.
Value *FrameMap = GetFrameMap(F);
Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
// Build the shadow stack entry at the very start of the function.
BasicBlock::iterator IP = F.getEntryBlock().begin();
IRBuilder<> AtEntry(IP->getParent(), IP);
Instruction *StackEntry = AtEntry.CreateAlloca(ConcreteStackEntryTy, 0,
"gc_frame");
while (isa<AllocaInst>(IP)) ++IP;
AtEntry.SetInsertPoint(IP->getParent(), IP);
// Initialize the map pointer and load the current head of the shadow stack.
Instruction *CurrentHead = AtEntry.CreateLoad(Head, "gc_currhead");
Instruction *EntryMapPtr = CreateGEP(Context, AtEntry, StackEntry,
0,1,"gc_frame.map");
AtEntry.CreateStore(FrameMap, EntryMapPtr);
// After all the allocas...
for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
// For each root, find the corresponding slot in the aggregate...
Value *SlotPtr = CreateGEP(Context, AtEntry, StackEntry, 1 + I, "gc_root");
// And use it in lieu of the alloca.
AllocaInst *OriginalAlloca = Roots[I].second;
SlotPtr->takeName(OriginalAlloca);
OriginalAlloca->replaceAllUsesWith(SlotPtr);
}
// Move past the original stores inserted by GCStrategy::InitRoots. This isn't
// really necessary (the collector would never see the intermediate state at
// runtime), but it's nicer not to push the half-initialized entry onto the
// shadow stack.
while (isa<StoreInst>(IP)) ++IP;
AtEntry.SetInsertPoint(IP->getParent(), IP);
// Push the entry onto the shadow stack.
Instruction *EntryNextPtr = CreateGEP(Context, AtEntry,
StackEntry,0,0,"gc_frame.next");
Instruction *NewHeadVal = CreateGEP(Context, AtEntry,
StackEntry, 0, "gc_newhead");
AtEntry.CreateStore(CurrentHead, EntryNextPtr);
AtEntry.CreateStore(NewHeadVal, Head);
// For each instruction that escapes...
EscapeEnumerator EE(F, "gc_cleanup");
while (IRBuilder<> *AtExit = EE.Next()) {
// Pop the entry from the shadow stack. Don't reuse CurrentHead from
// AtEntry, since that would make the value live for the entire function.
Instruction *EntryNextPtr2 = CreateGEP(Context, *AtExit, StackEntry, 0, 0,
"gc_frame.next");
Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
AtExit->CreateStore(SavedHead, Head);
}
// Delete the original allocas (which are no longer used) and the intrinsic
// calls (which are no longer valid). Doing this last avoids invalidating
// iterators.
for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
Roots[I].first->eraseFromParent();
Roots[I].second->eraseFromParent();
}
Roots.clear();
return true;
}
示例7: convertInstruction
static void convertInstruction(Instruction *Inst, ConversionState &State) {
if (SExtInst *Sext = dyn_cast<SExtInst>(Inst)) {
Value *Op = Sext->getOperand(0);
Value *NewInst = NULL;
// If the operand to be extended is illegal, we first need to fill its
// upper bits (which are zero) with its sign bit.
if (shouldConvert(Op)) {
NewInst = getSignExtend(State.getConverted(Op), Op, Sext);
}
// If the converted type of the operand is the same as the converted
// type of the result, we won't actually be changing the type of the
// variable, just its value.
if (getPromotedType(Op->getType()) !=
getPromotedType(Sext->getType())) {
NewInst = new SExtInst(
NewInst ? NewInst : State.getConverted(Op),
getPromotedType(cast<IntegerType>(Sext->getType())),
Sext->getName() + ".sext", Sext);
}
// Now all the bits of the result are correct, but we need to restore
// the bits above its type to zero.
if (shouldConvert(Sext)) {
NewInst = getClearUpper(NewInst, Sext->getType(), Sext);
}
assert(NewInst && "Failed to convert sign extension");
State.recordConverted(Sext, NewInst);
} else if (ZExtInst *Zext = dyn_cast<ZExtInst>(Inst)) {
Value *Op = Zext->getOperand(0);
Value *NewInst = NULL;
// TODO(dschuff): Some of these zexts could be no-ops.
if (shouldConvert(Op)) {
NewInst = getClearUpper(State.getConverted(Op),
Op->getType(),
Zext);
}
// If the converted type of the operand is the same as the converted
// type of the result, we won't actually be changing the type of the
// variable, just its value.
if (getPromotedType(Op->getType()) !=
getPromotedType(Zext->getType())) {
NewInst = CastInst::CreateZExtOrBitCast(
NewInst ? NewInst : State.getConverted(Op),
getPromotedType(cast<IntegerType>(Zext->getType())),
"", Zext);
}
assert(NewInst);
State.recordConverted(Zext, NewInst);
} else if (TruncInst *Trunc = dyn_cast<TruncInst>(Inst)) {
Value *Op = Trunc->getOperand(0);
Value *NewInst = NULL;
// If the converted type of the operand is the same as the converted
// type of the result, we won't actually be changing the type of the
// variable, just its value.
if (getPromotedType(Op->getType()) !=
getPromotedType(Trunc->getType())) {
NewInst = new TruncInst(
State.getConverted(Op),
getPromotedType(cast<IntegerType>(Trunc->getType())),
State.getConverted(Op)->getName() + ".trunc",
Trunc);
}
// Restoring the upper-bits-are-zero invariant effectively truncates the
// value.
if (shouldConvert(Trunc)) {
NewInst = getClearUpper(NewInst ? NewInst : Op,
Trunc->getType(),
Trunc);
}
assert(NewInst);
State.recordConverted(Trunc, NewInst);
} else if (AllocaInst *Alloc = dyn_cast<AllocaInst>(Inst)) {
// Don't handle arrays of illegal types, but we could handle an array
// with size specified as an illegal type, as unlikely as that seems.
if (shouldConvert(Alloc) && Alloc->isArrayAllocation())
report_fatal_error("Can't convert arrays of illegal type");
AllocaInst *NewInst = new AllocaInst(
getPromotedType(Alloc->getAllocatedType()),
State.getConverted(Alloc->getArraySize()),
"", Alloc);
NewInst->setAlignment(Alloc->getAlignment());
State.recordConverted(Alloc, NewInst);
} else if (BitCastInst *BCInst = dyn_cast<BitCastInst>(Inst)) {
// Only handle pointers. Ints can't be casted to/from other ints
Type *DestType = shouldConvert(BCInst) ?
getPromotedType(BCInst->getDestTy()) : BCInst->getDestTy();
BitCastInst *NewInst = new BitCastInst(
State.getConverted(BCInst->getOperand(0)),
DestType,
"", BCInst);
State.recordConverted(BCInst, NewInst);
} else if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
if (shouldConvert(Load)) {
splitLoad(Load, State);
}
} else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
if (shouldConvert(Store->getValueOperand())) {
splitStore(Store, State);
}
} else if (isa<CallInst>(Inst)) {
report_fatal_error("can't convert calls with illegal types");
//.........这里部分代码省略.........
示例8: boxed
static Value *julia_to_native(Type *ty, jl_value_t *jt, Value *jv,
jl_value_t *aty, bool addressOf,
bool byRef, bool inReg,
bool needCopy,
int argn, jl_codectx_t *ctx,
bool *needStackRestore)
{
Type *vt = jv->getType();
// We're passing any
if (ty == jl_pvalue_llvmt) {
return boxed(jv,ctx);
}
if (ty == vt && !addressOf && !byRef) {
return jv;
}
if (vt != jl_pvalue_llvmt) {
// argument value is unboxed
if (addressOf || (byRef && inReg)) {
if (ty->isPointerTy() && ty->getContainedType(0)==vt) {
// pass the address of an alloca'd thing, not a box
// since those are immutable.
*needStackRestore = true;
Value *slot = builder.CreateAlloca(vt);
builder.CreateStore(jv, slot);
return builder.CreateBitCast(slot, ty);
}
}
else if ((vt->isIntegerTy() && ty->isIntegerTy()) ||
(vt->isFloatingPointTy() && ty->isFloatingPointTy()) ||
(vt->isPointerTy() && ty->isPointerTy())) {
if (vt->getPrimitiveSizeInBits() ==
ty->getPrimitiveSizeInBits()) {
if (!byRef) {
return builder.CreateBitCast(jv, ty);
}
else {
*needStackRestore = true;
Value *mem = builder.CreateAlloca(ty);
builder.CreateStore(jv,builder.CreateBitCast(mem,vt->getPointerTo()));
return mem;
}
}
}
else if (vt->isStructTy()) {
if (!byRef) {
return jv;
}
else {
*needStackRestore = true;
Value *mem = builder.CreateAlloca(vt);
builder.CreateStore(jv,mem);
return mem;
}
}
emit_error("ccall: argument type did not match declaration", ctx);
}
if (jl_is_tuple(jt)) {
return emit_unbox(ty,jv,jt);
}
if (jl_is_cpointer_type(jt) && addressOf) {
assert(ty->isPointerTy());
jl_value_t *ety = jl_tparam0(jt);
if (aty != ety && ety != (jl_value_t*)jl_any_type && jt != (jl_value_t*)jl_voidpointer_type) {
std::stringstream msg;
msg << "ccall argument ";
msg << argn;
emit_typecheck(jv, ety, msg.str(), ctx);
}
if (jl_is_mutable_datatype(ety)) {
// no copy, just reference the data field
return builder.CreateBitCast(jv, ty);
}
else if (jl_is_immutable_datatype(ety) && jt != (jl_value_t*)jl_voidpointer_type) {
// yes copy
Value *nbytes;
if (jl_is_leaf_type(ety))
nbytes = ConstantInt::get(T_int32, jl_datatype_size(ety));
else
nbytes = tbaa_decorate(tbaa_datatype, builder.CreateLoad(
builder.CreateGEP(builder.CreatePointerCast(emit_typeof(jv), T_pint32),
ConstantInt::get(T_size, offsetof(jl_datatype_t,size)/sizeof(int32_t))),
false));
*needStackRestore = true;
AllocaInst *ai = builder.CreateAlloca(T_int8, nbytes);
ai->setAlignment(16);
builder.CreateMemCpy(ai, builder.CreateBitCast(jv, T_pint8), nbytes, 1);
return builder.CreateBitCast(ai, ty);
}
// emit maybe copy
*needStackRestore = true;
Value *jvt = emit_typeof(jv);
BasicBlock *mutableBB = BasicBlock::Create(getGlobalContext(),"is-mutable",ctx->f);
BasicBlock *immutableBB = BasicBlock::Create(getGlobalContext(),"is-immutable",ctx->f);
BasicBlock *afterBB = BasicBlock::Create(getGlobalContext(),"after",ctx->f);
Value *ismutable = builder.CreateTrunc(
tbaa_decorate(tbaa_datatype, builder.CreateLoad(
builder.CreateGEP(builder.CreatePointerCast(jvt, T_pint8),
ConstantInt::get(T_size, offsetof(jl_datatype_t,mutabl))),
//.........这里部分代码省略.........
示例9: CS
/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
/// and checks for the possibility of a call slot optimization by having
/// the call write its result directly into the destination of the memcpy.
bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
Value *cpyDest, Value *cpySrc,
uint64_t cpyLen, unsigned cpyAlign,
CallInst *C) {
// The general transformation to keep in mind is
//
// call @func(..., src, ...)
// memcpy(dest, src, ...)
//
// ->
//
// memcpy(dest, src, ...)
// call @func(..., dest, ...)
//
// Since moving the memcpy is technically awkward, we additionally check that
// src only holds uninitialized values at the moment of the call, meaning that
// the memcpy can be discarded rather than moved.
// Deliberately get the source and destination with bitcasts stripped away,
// because we'll need to do type comparisons based on the underlying type.
CallSite CS(C);
// Require that src be an alloca. This simplifies the reasoning considerably.
AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
if (!srcAlloca)
return false;
// Check that all of src is copied to dest.
if (!DL) return false;
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
return false;
uint64_t srcSize = DL->getTypeAllocSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
if (cpyLen < srcSize)
return false;
// Check that accessing the first srcSize bytes of dest will not cause a
// trap. Otherwise the transform is invalid since it might cause a trap
// to occur earlier than it otherwise would.
if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
// The destination is an alloca. Check it is larger than srcSize.
ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
if (!destArraySize)
return false;
uint64_t destSize = DL->getTypeAllocSize(A->getAllocatedType()) *
destArraySize->getZExtValue();
if (destSize < srcSize)
return false;
} else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
// If the destination is an sret parameter then only accesses that are
// outside of the returned struct type can trap.
if (!A->hasStructRetAttr())
return false;
Type *StructTy = cast<PointerType>(A->getType())->getElementType();
if (!StructTy->isSized()) {
// The call may never return and hence the copy-instruction may never
// be executed, and therefore it's not safe to say "the destination
// has at least <cpyLen> bytes, as implied by the copy-instruction",
return false;
}
uint64_t destSize = DL->getTypeAllocSize(StructTy);
if (destSize < srcSize)
return false;
} else {
return false;
}
// Check that dest points to memory that is at least as aligned as src.
unsigned srcAlign = srcAlloca->getAlignment();
if (!srcAlign)
srcAlign = DL->getABITypeAlignment(srcAlloca->getAllocatedType());
bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
// If dest is not aligned enough and we can't increase its alignment then
// bail out.
if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
return false;
// Check that src is not accessed except via the call and the memcpy. This
// guarantees that it holds only undefined values when passed in (so the final
// memcpy can be dropped), that it is not read or written between the call and
// the memcpy, and that writing beyond the end of it is undefined.
SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
srcAlloca->user_end());
while (!srcUseList.empty()) {
User *U = srcUseList.pop_back_val();
if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
for (User *UU : U->users())
srcUseList.push_back(UU);
//.........这里部分代码省略.........
示例10: initEnv
static int initEnv(Module *mainModule) {
/*
nArgcP = alloc oldArgc->getType()
nArgvV = alloc oldArgv->getType()
store oldArgc nArgcP
store oldArgv nArgvP
klee_init_environment(nArgcP, nArgvP)
nArgc = load nArgcP
nArgv = load nArgvP
oldArgc->replaceAllUsesWith(nArgc)
oldArgv->replaceAllUsesWith(nArgv)
*/
Function *mainFn = mainModule->getFunction(EntryPoint);
if (mainFn->arg_size() < 2) {
klee_error("Cannot handle ""--posix-runtime"" when main() has less than two arguments.\n");
}
Instruction* firstInst = mainFn->begin()->begin();
Value* oldArgc = mainFn->arg_begin();
Value* oldArgv = ++mainFn->arg_begin();
AllocaInst* argcPtr =
new AllocaInst(oldArgc->getType(), "argcPtr", firstInst);
AllocaInst* argvPtr =
new AllocaInst(oldArgv->getType(), "argvPtr", firstInst);
/* Insert void klee_init_env(int* argc, char*** argv) */
std::vector<const Type*> params;
params.push_back(Type::getInt32Ty(getGlobalContext()));
params.push_back(Type::getInt32Ty(getGlobalContext()));
Function* initEnvFn =
cast<Function>(mainModule->getOrInsertFunction("klee_init_env",
Type::getVoidTy(getGlobalContext()),
argcPtr->getType(),
argvPtr->getType(),
NULL));
assert(initEnvFn);
std::vector<Value*> args;
args.push_back(argcPtr);
args.push_back(argvPtr);
#if LLVM_VERSION_CODE >= LLVM_VERSION(3, 0)
Instruction* initEnvCall = CallInst::Create(initEnvFn, args,
"", firstInst);
#else
Instruction* initEnvCall = CallInst::Create(initEnvFn, args.begin(), args.end(),
"", firstInst);
#endif
Value *argc = new LoadInst(argcPtr, "newArgc", firstInst);
Value *argv = new LoadInst(argvPtr, "newArgv", firstInst);
oldArgc->replaceAllUsesWith(argc);
oldArgv->replaceAllUsesWith(argv);
new StoreInst(oldArgc, argcPtr, initEnvCall);
new StoreInst(oldArgv, argvPtr, initEnvCall);
return 0;
}
示例11: while
//
// Method: insertBadAllocationSizes()
//
// Description:
// This method will look for allocations and change their size to be
// incorrect. It does the following:
// o) Changes the number of array elements allocated by alloca and malloc.
//
// Return value:
// true - The module was modified.
// false - The module was left unmodified.
//
bool
FaultInjector::insertBadAllocationSizes (Function & F) {
// Worklist of allocation sites to rewrite
std::vector<AllocaInst * > WorkList;
for (Function::iterator fI = F.begin(), fE = F.end(); fI != fE; ++fI) {
BasicBlock & BB = *fI;
for (BasicBlock::iterator I = BB.begin(), bE = BB.end(); I != bE; ++I) {
if (AllocaInst * AI = dyn_cast<AllocaInst>(I)) {
if (AI->isArrayAllocation()) {
// Skip if we should not insert a fault.
if (!doFault()) continue;
WorkList.push_back(AI);
}
}
}
}
while (WorkList.size()) {
AllocaInst * AI = WorkList.back();
WorkList.pop_back();
//
// Print information about where the fault is being inserted.
//
printSourceInfo ("Bad allocation size", AI);
Instruction * NewAlloc = 0;
NewAlloc = new AllocaInst (AI->getAllocatedType(),
ConstantInt::get(Int32Type,0),
AI->getAlignment(),
AI->getName(),
AI);
AI->replaceAllUsesWith (NewAlloc);
AI->eraseFromParent();
++BadSizes;
}
//
// Try harder to make bad allocation sizes.
//
WorkList.clear();
for (Function::iterator fI = F.begin(), fE = F.end(); fI != fE; ++fI) {
BasicBlock & BB = *fI;
for (BasicBlock::iterator I = BB.begin(), bE = BB.end(); I != bE; ++I) {
if (AllocaInst * AI = dyn_cast<AllocaInst>(I)) {
//
// Determine if this is a data type that we can make smaller.
//
if (((TD->getTypeAllocSize(AI->getAllocatedType())) > 4) && doFault()) {
WorkList.push_back(AI);
}
}
}
}
//
// Replace these allocations with an allocation of an integer and cast the
// result back into the appropriate type.
//
while (WorkList.size()) {
AllocaInst * AI = WorkList.back();
WorkList.pop_back();
Instruction * NewAlloc = 0;
NewAlloc = new AllocaInst (Int32Type,
AI->getArraySize(),
AI->getAlignment(),
AI->getName(),
AI);
NewAlloc = castTo (NewAlloc, AI->getType(), "", AI);
AI->replaceAllUsesWith (NewAlloc);
AI->eraseFromParent();
++BadSizes;
}
return (BadSizes > 0);
}
示例12: assert
//.........这里部分代码省略.........
CallInst::Create(trackInitInst, Args, "", CI);
toDelete.push_back(CI);
}
}
for(Value::use_iterator User = checkTypeInst->use_begin(); User != checkTypeInst->use_end(); ++User) {
CallInst *CI = dyn_cast<CallInst>(*User);
assert(CI);
if(TS->isTypeSafe(CI->getOperand(4)->stripPointerCasts(), CI->getParent()->getParent())) {
toDelete.push_back(CI);
}
}
for(Value::use_iterator User = trackStoreInst->use_begin(); User != trackStoreInst->use_end(); ++User) {
CallInst *CI = dyn_cast<CallInst>(*User);
assert(CI);
if(TS->isTypeSafe(CI->getOperand(1)->stripPointerCasts(), CI->getParent()->getParent())) {
toDelete.push_back(CI);
}
}
// for alloca's if they are type known
// assume initialized with TOP
for(Value::use_iterator User = trackUnInitInst->use_begin(); User != trackUnInitInst->use_end(); ) {
CallInst *CI = dyn_cast<CallInst>(*(User++));
assert(CI);
// check if operand is an alloca inst.
if(TS->isTypeSafe(CI->getOperand(1)->stripPointerCasts(), CI->getParent()->getParent())) {
CI->setCalledFunction(trackInitInst);
if(AllocaInst *AI = dyn_cast<AllocaInst>(CI->getOperand(1)->stripPointerCasts())) {
// Initialize the allocation to NULL
std::vector<Value *> Args2;
Args2.push_back(CI->getOperand(1));
Args2.push_back(ConstantInt::get(Int8Ty, 0));
Args2.push_back(CI->getOperand(2));
Args2.push_back(ConstantInt::get(Int32Ty, AI->getAlignment()));
CallInst::Create(memsetF, Args2, "", CI);
}
}
}
if(MallocFunc) {
for(Value::use_iterator User = MallocFunc->use_begin(); User != MallocFunc->use_end(); User ++) {
CallInst *CI = dyn_cast<CallInst>(*User);
if(!CI)
continue;
if(TS->isTypeSafe(CI, CI->getParent()->getParent())){
CastInst *BCI = BitCastInst::CreatePointerCast(CI, VoidPtrTy);
CastInst *Size = CastInst::CreateSExtOrBitCast(CI->getOperand(1), Int64Ty);
Size->insertAfter(CI);
BCI->insertAfter(Size);
std::vector<Value *>Args;
Args.push_back(BCI);
Args.push_back(Size);
Args.push_back(ConstantInt::get(Int32Ty, 0));
CallInst *CINew = CallInst::Create(trackInitInst, Args);
CINew->insertAfter(BCI);
}
}
}
// also do for mallocs/calloc/other allocators???
示例13: Values
void PromoteMem2Reg::run() {
Function &F = *DT.getRoot()->getParent();
if (AST) PointerAllocaValues.resize(Allocas.size());
AllocaDbgDeclares.resize(Allocas.size());
AllocaInfo Info;
LargeBlockInfo LBI;
for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) {
AllocaInst *AI = Allocas[AllocaNum];
assert(isAllocaPromotable(AI) &&
"Cannot promote non-promotable alloca!");
assert(AI->getParent()->getParent() == &F &&
"All allocas should be in the same function, which is same as DF!");
removeLifetimeIntrinsicUsers(AI);
if (AI->use_empty()) {
// If there are no uses of the alloca, just delete it now.
if (AST) AST->deleteValue(AI);
AI->eraseFromParent();
// Remove the alloca from the Allocas list, since it has been processed
RemoveFromAllocasList(AllocaNum);
++NumDeadAlloca;
continue;
}
// Calculate the set of read and write-locations for each alloca. This is
// analogous to finding the 'uses' and 'definitions' of each variable.
Info.AnalyzeAlloca(AI);
// If there is only a single store to this value, replace any loads of
// it that are directly dominated by the definition with the value stored.
if (Info.DefiningBlocks.size() == 1) {
RewriteSingleStoreAlloca(AI, Info, LBI);
// Finally, after the scan, check to see if the store is all that is left.
if (Info.UsingBlocks.empty()) {
// Record debuginfo for the store and remove the declaration's
// debuginfo.
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
if (!DIB)
DIB = new DIBuilder(*DDI->getParent()->getParent()->getParent());
ConvertDebugDeclareToDebugValue(DDI, Info.OnlyStore, *DIB);
DDI->eraseFromParent();
}
// Remove the (now dead) store and alloca.
Info.OnlyStore->eraseFromParent();
LBI.deleteValue(Info.OnlyStore);
if (AST) AST->deleteValue(AI);
AI->eraseFromParent();
LBI.deleteValue(AI);
// The alloca has been processed, move on.
RemoveFromAllocasList(AllocaNum);
++NumSingleStore;
continue;
}
}
// If the alloca is only read and written in one basic block, just perform a
// linear sweep over the block to eliminate it.
if (Info.OnlyUsedInOneBlock) {
PromoteSingleBlockAlloca(AI, Info, LBI);
// Finally, after the scan, check to see if the stores are all that is
// left.
if (Info.UsingBlocks.empty()) {
// Remove the (now dead) stores and alloca.
while (!AI->use_empty()) {
StoreInst *SI = cast<StoreInst>(AI->use_back());
// Record debuginfo for the store before removing it.
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
if (!DIB)
DIB = new DIBuilder(*SI->getParent()->getParent()->getParent());
ConvertDebugDeclareToDebugValue(DDI, SI, *DIB);
}
SI->eraseFromParent();
LBI.deleteValue(SI);
}
if (AST) AST->deleteValue(AI);
AI->eraseFromParent();
LBI.deleteValue(AI);
// The alloca has been processed, move on.
RemoveFromAllocasList(AllocaNum);
// The alloca's debuginfo can be removed as well.
if (DbgDeclareInst *DDI = Info.DbgDeclare)
DDI->eraseFromParent();
++NumLocalPromoted;
continue;
//.........这里部分代码省略.........
示例14: assert
//
// Method: TransformCSSAllocasToMallocs()
//
// Description:
// This method is given the set of DSNodes from the stack safety pass that
// have been marked for promotion. It then finds all alloca instructions
// that have not been marked type-unknown and promotes them to heap
// allocations.
//
void
ConvertUnsafeAllocas::TransformCSSAllocasToMallocs (Module & M,
std::set<DSNode *> & cssAllocaNodes) {
for (Module::iterator FI = M.begin(); FI != M.end(); ++FI) {
//
// Skip functions that have no DSGraph. These are probably functions with
// no function body and are, hence, cannot be analyzed.
//
if (!(budsPass->hasDSGraph (*FI))) continue;
//
// Get the DSGraph for the current function.
//
DSGraph *DSG = budsPass->getDSGraph(*FI);
//
// Search for alloca instructions that need promotion and add them to the
// worklist.
//
std::vector<AllocaInst *> Worklist;
for (Function::iterator BB = FI->begin(); BB != FI->end(); ++BB) {
for (BasicBlock::iterator ii = BB->begin(); ii != BB->end(); ++ii) {
Instruction * I = ii;
if (AllocaInst * AI = dyn_cast<AllocaInst>(I)) {
//
// Get the DSNode for the allocation.
//
DSNode *DSN = DSG->getNodeForValue(AI).getNode();
assert (DSN && "No DSNode for alloca!\n");
//
// If the alloca is type-known, we do not need to promote it, so
// don't bother with it.
//
if (DSN->isNodeCompletelyFolded()) continue;
//
// Determine if the DSNode for the alloca is one of those marked as
// unsafe by the stack safety analysis pass. If not, then we do not
// need to promote it.
//
if (cssAllocaNodes.find(DSN) == cssAllocaNodes.end()) continue;
//
// If the DSNode for this alloca is already listed in the
// unsafeAllocaNode vector, remove it since we are processing it here
//
std::list<DSNode *>::iterator NodeI = find (unsafeAllocaNodes.begin(),
unsafeAllocaNodes.end(),
DSN);
if (NodeI != unsafeAllocaNodes.end()) {
unsafeAllocaNodes.erase(NodeI);
}
//
// This alloca needs to be changed to a malloc. Add it to the
// worklist.
//
Worklist.push_back (AI);
}
}
}
//
// Update the statistics.
//
if (Worklist.size())
ConvAllocas += Worklist.size();
//
// Convert everything in the worklist into a malloc instruction.
//
while (Worklist.size()) {
//
// Grab an alloca from the worklist.
//
AllocaInst * AI = Worklist.back();
Worklist.pop_back();
//
// Get the DSNode for this alloca.
//
DSNode *DSN = DSG->getNodeForValue(AI).getNode();
assert (DSN && "No DSNode for alloca!\n");
//
// Promote the alloca and remove it from the program.
//
promoteAlloca (AI, DSN);
AI->getParent()->getInstList().erase(AI);
//.........这里部分代码省略.........
示例15: InlineCallIfPossible
/// InlineCallIfPossible - If it is possible to inline the specified call site,
/// do so and update the CallGraph for this operation.
///
/// This function also does some basic book-keeping to update the IR. The
/// InlinedArrayAllocas map keeps track of any allocas that are already
/// available from other functions inlined into the caller. If we are able to
/// inline this call site we attempt to reuse already available allocas or add
/// any new allocas to the set if not possible.
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
InlinedArrayAllocasTy &InlinedArrayAllocas,
int InlineHistory, bool InsertLifetime) {
Function *Callee = CS.getCalledFunction();
Function *Caller = CS.getCaller();
// Try to inline the function. Get the list of static allocas that were
// inlined.
if (!InlineFunction(CS, IFI, InsertLifetime))
return false;
// If the inlined function had a higher stack protection level than the
// calling function, then bump up the caller's stack protection level.
if (Callee->hasFnAttr(Attribute::StackProtectReq))
Caller->addFnAttr(Attribute::StackProtectReq);
else if (Callee->hasFnAttr(Attribute::StackProtect) &&
!Caller->hasFnAttr(Attribute::StackProtectReq))
Caller->addFnAttr(Attribute::StackProtect);
// Look at all of the allocas that we inlined through this call site. If we
// have already inlined other allocas through other calls into this function,
// then we know that they have disjoint lifetimes and that we can merge them.
//
// There are many heuristics possible for merging these allocas, and the
// different options have different tradeoffs. One thing that we *really*
// don't want to hurt is SRoA: once inlining happens, often allocas are no
// longer address taken and so they can be promoted.
//
// Our "solution" for that is to only merge allocas whose outermost type is an
// array type. These are usually not promoted because someone is using a
// variable index into them. These are also often the most important ones to
// merge.
//
// A better solution would be to have real memory lifetime markers in the IR
// and not have the inliner do any merging of allocas at all. This would
// allow the backend to do proper stack slot coloring of all allocas that
// *actually make it to the backend*, which is really what we want.
//
// Because we don't have this information, we do this simple and useful hack.
//
SmallPtrSet<AllocaInst*, 16> UsedAllocas;
// When processing our SCC, check to see if CS was inlined from some other
// call site. For example, if we're processing "A" in this code:
// A() { B() }
// B() { x = alloca ... C() }
// C() { y = alloca ... }
// Assume that C was not inlined into B initially, and so we're processing A
// and decide to inline B into A. Doing this makes an alloca available for
// reuse and makes a callsite (C) available for inlining. When we process
// the C call site we don't want to do any alloca merging between X and Y
// because their scopes are not disjoint. We could make this smarter by
// keeping track of the inline history for each alloca in the
// InlinedArrayAllocas but this isn't likely to be a significant win.
if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
return true;
// Loop over all the allocas we have so far and see if they can be merged with
// a previously inlined alloca. If not, remember that we had it.
for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
AllocaNo != e; ++AllocaNo) {
AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
// Don't bother trying to merge array allocations (they will usually be
// canonicalized to be an allocation *of* an array), or allocations whose
// type is not itself an array (because we're afraid of pessimizing SRoA).
ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
if (ATy == 0 || AI->isArrayAllocation())
continue;
// Get the list of all available allocas for this array type.
std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
// Loop over the allocas in AllocasForType to see if we can reuse one. Note
// that we have to be careful not to reuse the same "available" alloca for
// multiple different allocas that we just inlined, we use the 'UsedAllocas'
// set to keep track of which "available" allocas are being used by this
// function. Also, AllocasForType can be empty of course!
bool MergedAwayAlloca = false;
for (unsigned i = 0, e = AllocasForType.size(); i != e; ++i) {
AllocaInst *AvailableAlloca = AllocasForType[i];
// The available alloca has to be in the right function, not in some other
// function in this SCC.
if (AvailableAlloca->getParent() != AI->getParent())
continue;
// If the inlined function already uses this alloca then we can't reuse
// it.
if (!UsedAllocas.insert(AvailableAlloca))
continue;
//.........这里部分代码省略.........