本文整理汇总了C++中CallInst类的典型用法代码示例。如果您正苦于以下问题:C++ CallInst类的具体用法?C++ CallInst怎么用?C++ CallInst使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CallInst类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: processStore
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (!SI->isSimple()) return false;
// Avoid merging nontemporal stores since the resulting
// memcpy/memset would not be able to preserve the nontemporal hint.
// In theory we could teach how to propagate the !nontemporal metadata to
// memset calls. However, that change would force the backend to
// conservatively expand !nontemporal memset calls back to sequences of
// store instructions (effectively undoing the merging).
if (SI->getMetadata(LLVMContext::MD_nontemporal))
return false;
const DataLayout &DL = SI->getModule()->getDataLayout();
// Detect cases where we're performing call slot forwarding, but
// happen to be using a load-store pair to implement it, rather than
// a memcpy.
if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
if (LI->isSimple() && LI->hasOneUse() &&
LI->getParent() == SI->getParent()) {
MemDepResult ldep = MD->getDependency(LI);
CallInst *C = nullptr;
if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
C = dyn_cast<CallInst>(ldep.getInst());
if (C) {
// Check that nothing touches the dest of the "copy" between
// the call and the store.
AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
MemoryLocation StoreLoc = MemoryLocation::get(SI);
for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
I != E; --I) {
if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
C = nullptr;
break;
}
}
}
if (C) {
unsigned storeAlign = SI->getAlignment();
if (!storeAlign)
storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
unsigned loadAlign = LI->getAlignment();
if (!loadAlign)
loadAlign = DL.getABITypeAlignment(LI->getType());
bool changed = performCallSlotOptzn(
LI, SI->getPointerOperand()->stripPointerCasts(),
LI->getPointerOperand()->stripPointerCasts(),
DL.getTypeStoreSize(SI->getOperand(0)->getType()),
std::min(storeAlign, loadAlign), C);
if (changed) {
MD->removeInstruction(SI);
SI->eraseFromParent();
MD->removeInstruction(LI);
LI->eraseFromParent();
++NumMemCpyInstr;
return true;
}
}
}
}
// There are two cases that are interesting for this code to handle: memcpy
// and memset. Right now we only handle memset.
// Ensure that the value being stored is something that can be memset'able a
// byte at a time like "0" or "-1" or any width, as well as things like
// 0xA0A0A0A0 and 0.0.
if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
ByteVal)) {
BBI = I->getIterator(); // Don't invalidate iterator.
return true;
}
return false;
}
示例2: SplitBlock
bool LowerEmSetjmp::runOnModule(Module &M) {
TheModule = &M;
Function *Setjmp = TheModule->getFunction("setjmp");
Function *Longjmp = TheModule->getFunction("longjmp");
if (!Setjmp && !Longjmp) return false;
Type *i32 = Type::getInt32Ty(M.getContext());
Type *Void = Type::getVoidTy(M.getContext());
// Add functions
Function *EmSetjmp = NULL;
if (Setjmp) {
SmallVector<Type*, 2> EmSetjmpTypes;
EmSetjmpTypes.push_back(Setjmp->getFunctionType()->getParamType(0));
EmSetjmpTypes.push_back(i32); // extra param that says which setjmp in the function it is
FunctionType *EmSetjmpFunc = FunctionType::get(i32, EmSetjmpTypes, false);
EmSetjmp = Function::Create(EmSetjmpFunc, GlobalValue::ExternalLinkage, "emscripten_setjmp", TheModule);
}
Function *EmLongjmp = Longjmp ? Function::Create(Longjmp->getFunctionType(), GlobalValue::ExternalLinkage, "emscripten_longjmp", TheModule) : NULL;
SmallVector<Type*, 1> IntArgTypes;
IntArgTypes.push_back(i32);
FunctionType *IntIntFunc = FunctionType::get(i32, IntArgTypes, false);
Function *CheckLongjmp = Function::Create(IntIntFunc, GlobalValue::ExternalLinkage, "emscripten_check_longjmp", TheModule); // gets control flow
Function *GetLongjmpResult = Function::Create(IntIntFunc, GlobalValue::ExternalLinkage, "emscripten_get_longjmp_result", TheModule); // gets int value longjmp'd
FunctionType *VoidFunc = FunctionType::get(Void, false);
Function *PrepSetjmp = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_prep_setjmp", TheModule);
Function *CleanupSetjmp = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_cleanup_setjmp", TheModule);
Function *PreInvoke = TheModule->getFunction("emscripten_preinvoke");
if (!PreInvoke) PreInvoke = Function::Create(VoidFunc, GlobalValue::ExternalLinkage, "emscripten_preinvoke", TheModule);
FunctionType *IntFunc = FunctionType::get(i32, false);
Function *PostInvoke = TheModule->getFunction("emscripten_postinvoke");
if (!PostInvoke) PostInvoke = Function::Create(IntFunc, GlobalValue::ExternalLinkage, "emscripten_postinvoke", TheModule);
// Process all callers of setjmp and longjmp. Start with setjmp.
typedef std::vector<PHINode*> Phis;
typedef std::map<Function*, Phis> FunctionPhisMap;
FunctionPhisMap SetjmpOutputPhis;
std::vector<Instruction*> ToErase;
if (Setjmp) {
for (Instruction::user_iterator UI = Setjmp->user_begin(), UE = Setjmp->user_end(); UI != UE; ++UI) {
User *U = *UI;
if (CallInst *CI = dyn_cast<CallInst>(U)) {
BasicBlock *SJBB = CI->getParent();
// The tail is everything right after the call, and will be reached once when setjmp is
// called, and later when longjmp returns to the setjmp
BasicBlock *Tail = SplitBlock(SJBB, CI->getNextNode());
// Add a phi to the tail, which will be the output of setjmp, which indicates if this is the
// first call or a longjmp back. The phi directly uses the right value based on where we
// arrive from
PHINode *SetjmpOutput = PHINode::Create(i32, 2, "", Tail->getFirstNonPHI());
SetjmpOutput->addIncoming(ConstantInt::get(i32, 0), SJBB); // setjmp initial call returns 0
CI->replaceAllUsesWith(SetjmpOutput); // The proper output is now this, not the setjmp call itself
// longjmp returns to the setjmp will add themselves to this phi
Phis& P = SetjmpOutputPhis[SJBB->getParent()];
P.push_back(SetjmpOutput);
// fix call target
SmallVector<Value *, 2> Args;
Args.push_back(CI->getArgOperand(0));
Args.push_back(ConstantInt::get(i32, P.size())); // our index in the function is our place in the array + 1
CallInst::Create(EmSetjmp, Args, "", CI);
ToErase.push_back(CI);
} else {
errs() << **UI << "\n";
report_fatal_error("bad use of setjmp, should only call it");
}
}
}
// Update longjmp FIXME: we could avoid throwing in longjmp as an optimization when longjmping back into the current function perhaps?
if (Longjmp) Longjmp->replaceAllUsesWith(EmLongjmp);
// Update all setjmping functions
for (FunctionPhisMap::iterator I = SetjmpOutputPhis.begin(); I != SetjmpOutputPhis.end(); I++) {
Function *F = I->first;
Phis& P = I->second;
CallInst::Create(PrepSetjmp, "", F->begin()->begin());
// Update each call that can longjmp so it can return to a setjmp where relevant
for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ) {
BasicBlock *BB = BBI++;
for (BasicBlock::iterator Iter = BB->begin(), E = BB->end(); Iter != E; ) {
Instruction *I = Iter++;
CallInst *CI;
//.........这里部分代码省略.........
示例3: switch
//.........这里部分代码省略.........
break;
case Instruction::Store:
{
Value * sval = inst->getOperand(0);
Value * sadd = inst->getOperand(1);
addPtrTo(wrapValue(sadd), wrapValue(sval));
}
break;
case Instruction::GetElementPtr:
{
makeAlias(wrapValue(inst), handle_gep((GEPOperator*) inst));
}
break;
// conversion operations
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::BitCast:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
{
Value * itpv = inst->getOperand(0);
makeAlias(wrapValue(inst), wrapValue(itpv));
}
break;
// other operations
case Instruction::Invoke: // invoke is a terminal operation
{
InvokeInst * invoke = (InvokeInst*) inst;
LandingPadInst* lpd = invoke->getLandingPadInst();
parent_func->addLandingPad(invoke, lpd);
Value * cv = invoke->getCalledValue();
vector<Value*> args;
for (unsigned i = 0; i < invoke->getNumArgOperands(); i++) {
args.push_back(invoke->getArgOperand(i));
}
this->handle_invoke_call_inst(invoke, cv, &args, parent_func);
}
break;
case Instruction::Call:
{
CallInst * callinst = (CallInst*) inst;
if (callinst->isInlineAsm()) {
break;
}
Value * cv = callinst->getCalledValue();
vector<Value*> args;
for (unsigned i = 0; i < callinst->getNumArgOperands(); i++) {
args.push_back(callinst->getArgOperand(i));
}
this->handle_invoke_call_inst(callinst, cv, &args, parent_func);
}
break;
case Instruction::PHI:
{
PHINode *phi = (PHINode *) inst;
int nums = phi->getNumIncomingValues();
for (int i = 0; i < nums; i++) {
Value * p = phi->getIncomingValue(i);
makeAlias(wrapValue(inst), wrapValue(p));
}
}
break;
case Instruction::Select:
{
Value *first = ((SelectInst*) inst)->getTrueValue();
Value *second = ((SelectInst*) inst)->getFalseValue();
makeAlias(wrapValue(inst), wrapValue(first));
makeAlias(wrapValue(inst), wrapValue(second));
}
break;
case Instruction::VAArg:
{
parent_func->addVAArg(inst);
DyckVertex* vaarg = wrapValue(inst);
Value * ptrVaarg = inst->getOperand(0);
addPtrTo(wrapValue(ptrVaarg), vaarg);
}
break;
case Instruction::LandingPad: // handled with invoke inst
case Instruction::ICmp:
case Instruction::FCmp:
default:
break;
}
}
示例4: FindAllCleanupSelectors
//.........这里部分代码省略.........
if (!URoR) return CleanupSelectors(CatchAllSels);
}
SmallPtrSet<InvokeInst*, 32> URoRInvokes;
FindAllURoRInvokes(URoRInvokes);
SmallPtrSet<IntrinsicInst*, 32> SelsToConvert;
for (SmallPtrSet<IntrinsicInst*, 32>::iterator
SI = Sels.begin(), SE = Sels.end(); SI != SE; ++SI) {
const BasicBlock *SelBB = (*SI)->getParent();
for (SmallPtrSet<InvokeInst*, 32>::iterator
UI = URoRInvokes.begin(), UE = URoRInvokes.end(); UI != UE; ++UI) {
const BasicBlock *URoRBB = (*UI)->getParent();
if (DT->dominates(SelBB, URoRBB)) {
SelsToConvert.insert(*SI);
break;
}
}
}
bool Changed = false;
if (Sels.size() != SelsToConvert.size()) {
// If we haven't been able to convert all of the clean-up selectors, then
// loop through the slow way to see if they still need to be converted.
if (!ExceptionValueIntrinsic) {
ExceptionValueIntrinsic =
Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_exception);
if (!ExceptionValueIntrinsic)
return CleanupSelectors(CatchAllSels);
}
for (Value::use_iterator
I = ExceptionValueIntrinsic->use_begin(),
E = ExceptionValueIntrinsic->use_end(); I != E; ++I) {
IntrinsicInst *EHPtr = dyn_cast<IntrinsicInst>(*I);
if (!EHPtr || EHPtr->getParent()->getParent() != F) continue;
Changed |= PromoteEHPtrStore(EHPtr);
bool URoRInvoke = false;
SmallPtrSet<IntrinsicInst*, 8> SelCalls;
Changed |= FindSelectorAndURoR(EHPtr, URoRInvoke, SelCalls);
if (URoRInvoke) {
// This EH pointer is being used by an invoke of an URoR instruction and
// an eh.selector intrinsic call. If the eh.selector is a 'clean-up', we
// need to convert it to a 'catch-all'.
for (SmallPtrSet<IntrinsicInst*, 8>::iterator
SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
if (!HasCatchAllInSelector(*SI))
SelsToConvert.insert(*SI);
}
}
}
if (!SelsToConvert.empty()) {
// Convert all clean-up eh.selectors, which are associated with "invokes" of
// URoR calls, into catch-all eh.selectors.
Changed = true;
for (SmallPtrSet<IntrinsicInst*, 8>::iterator
SI = SelsToConvert.begin(), SE = SelsToConvert.end();
SI != SE; ++SI) {
IntrinsicInst *II = *SI;
// Use the exception object pointer and the personality function
// from the original selector.
CallSite CS(II);
IntrinsicInst::op_iterator I = CS.arg_begin();
IntrinsicInst::op_iterator E = CS.arg_end();
IntrinsicInst::op_iterator B = prior(E);
// Exclude last argument if it is an integer.
if (isa<ConstantInt>(B)) E = B;
// Add exception object pointer (front).
// Add personality function (next).
// Add in any filter IDs (rest).
SmallVector<Value*, 8> Args(I, E);
Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.
CallInst *NewSelector =
CallInst::Create(SelectorIntrinsic, Args.begin(), Args.end(),
"eh.sel.catch.all", II);
NewSelector->setTailCall(II->isTailCall());
NewSelector->setAttributes(II->getAttributes());
NewSelector->setCallingConv(II->getCallingConv());
II->replaceAllUsesWith(NewSelector);
II->eraseFromParent();
}
}
Changed |= CleanupSelectors(CatchAllSels);
return Changed;
}
示例5: targetData
//
// Method: runOnModule()
//
// Description:
// Entry point for this LLVM pass.
// If a function returns a struct, make it return
// a pointer to the struct.
//
// Inputs:
// M - A reference to the LLVM module to transform
//
// Outputs:
// M - The transformed LLVM module.
//
// Return value:
// true - The module was modified.
// false - The module was not modified.
//
bool StructRet::runOnModule(Module& M) {
const llvm::DataLayout targetData(&M);
std::vector<Function*> worklist;
for (Module::iterator I = M.begin(); I != M.end(); ++I)
if (!I->mayBeOverridden()) {
if(I->hasAddressTaken())
continue;
if(I->getReturnType()->isStructTy()) {
worklist.push_back(I);
}
}
while(!worklist.empty()) {
Function *F = worklist.back();
worklist.pop_back();
Type *NewArgType = F->getReturnType()->getPointerTo();
// Construct the new Type
std::vector<Type*>TP;
TP.push_back(NewArgType);
for (Function::arg_iterator ii = F->arg_begin(), ee = F->arg_end();
ii != ee; ++ii) {
TP.push_back(ii->getType());
}
FunctionType *NFTy = FunctionType::get(F->getReturnType(), TP, F->isVarArg());
// Create the new function body and insert it into the module.
Function *NF = Function::Create(NFTy,
F->getLinkage(),
F->getName(), &M);
ValueToValueMapTy ValueMap;
Function::arg_iterator NI = NF->arg_begin();
NI->setName("ret");
++NI;
for (Function::arg_iterator II = F->arg_begin(); II != F->arg_end(); ++II, ++NI) {
ValueMap[II] = NI;
NI->setName(II->getName());
AttributeSet attrs = F->getAttributes().getParamAttributes(II->getArgNo() + 1);
if (!attrs.isEmpty())
NI->addAttr(attrs);
}
// Perform the cloning.
SmallVector<ReturnInst*,100> Returns;
if (!F->isDeclaration())
CloneFunctionInto(NF, F, ValueMap, false, Returns);
std::vector<Value*> fargs;
for(Function::arg_iterator ai = NF->arg_begin(),
ae= NF->arg_end(); ai != ae; ++ai) {
fargs.push_back(ai);
}
NF->setAttributes(NF->getAttributes().addAttributes(
M.getContext(), 0, F->getAttributes().getRetAttributes()));
NF->setAttributes(NF->getAttributes().addAttributes(
M.getContext(), ~0, F->getAttributes().getFnAttributes()));
for (Function::iterator B = NF->begin(), FE = NF->end(); B != FE; ++B) {
for (BasicBlock::iterator I = B->begin(), BE = B->end(); I != BE;) {
ReturnInst * RI = dyn_cast<ReturnInst>(I++);
if(!RI)
continue;
LoadInst *LI = dyn_cast<LoadInst>(RI->getOperand(0));
assert(LI && "Return should be preceded by a load instruction");
IRBuilder<> Builder(RI);
Builder.CreateMemCpy(fargs.at(0),
LI->getPointerOperand(),
targetData.getTypeStoreSize(LI->getType()),
targetData.getPrefTypeAlignment(LI->getType()));
}
}
for(Value::use_iterator ui = F->use_begin(), ue = F->use_end();
ui != ue; ) {
CallInst *CI = dyn_cast<CallInst>(*ui++);
if(!CI)
continue;
if(CI->getCalledFunction() != F)
continue;
if(CI->hasByValArgument())
continue;
AllocaInst *AllocaNew = new AllocaInst(F->getReturnType(), 0, "", CI);
//.........这里部分代码省略.........
示例6: dbgs
//.........这里部分代码省略.........
// if (inst_it != (*filter_paths)[pair_it->first].end()) {
// continue;
// }
// }
// (*filter_paths)[pair_it->first].push_back(pair_it->second);
// }
// }
dbgs() << "[filter_paths]: contain " << filter_paths->size() << " functions in all.\n";
for (func_bbs_type::iterator fbs_it = filter_paths->begin(); fbs_it != filter_paths->end(); fbs_it++) {
for (std::vector<Instruction*>::iterator bb_it2 = fbs_it->second.begin(); bb_it2 != fbs_it->second.end(); bb_it2++) {
dbgs() << "^^^^^^ " << fbs_it->first->getName() << ": " << (*bb_it2)->getParent()->getName() << '\n';
// to expand functions
call_insts.push_back((*bb_it2));
explore_basicblock_paths(fbs_it->first, (*bb_it2)->getParent(), &(*BB_paths_map)[std::make_pair(fbs_it->first, *bb_it2)]);
dbgs() << "^^^^^^ found " << (*BB_paths_map)[std::make_pair(fbs_it->first, *bb_it2)].size() << " basicblocks.\n";
}
}
llvm::dbgs() << "!!!!!!!! Found " << call_insts.size() << " call instructions.\n";
llvm::dbgs() << "!!!!!!!! Found " << path_basicblocks.size() << " path basicblocks.\n";
// expand functions
for (std::vector<Instruction*>::iterator ci_it = call_insts.begin(); ci_it != call_insts.end(); ci_it++) {
BasicBlock *call_bb = (*ci_it)->getParent();
if (!call_bb) {
continue;
}
for (BasicBlock::iterator inst = call_bb->begin(); inst != call_bb->end(); inst++) {
if (&*inst == *ci_it) {
break;
}
if (isa<CallInst>(&*inst)) {
std::vector<Instruction*>::iterator ci = std::find(path_call_insts.begin(), path_call_insts.end(), &*inst);
if (ci != path_call_insts.end())
continue;
path_call_insts.push_back(&*inst);
}
}
}
llvm::dbgs() << "@@@@@@@@ After search call_insts, found " << path_call_insts.size() << " call instructions.\n";
for (std::vector<BasicBlock*>::iterator p_bb_it = path_basicblocks.begin(); p_bb_it != path_basicblocks.end(); p_bb_it++) {
for (BasicBlock::iterator inst = (*p_bb_it)->begin(); inst != (*p_bb_it)->end(); inst++) {
if (isa<CallInst>(&*inst)) {
std::vector<Instruction*>::iterator ci = std::find(path_call_insts.begin(), path_call_insts.end(), &*inst);
if (ci != path_call_insts.end())
continue;
path_call_insts.push_back(&*inst);
}
}
}
llvm::dbgs() << "@@@@@@@@ After search path_basicblocks, found " << path_call_insts.size() << " call instructions.\n";
for (std::vector<Instruction*>::iterator iit = path_call_insts.begin(); iit != path_call_insts.end(); iit++) {
CallInst *ci = dyn_cast<CallInst>(*iit);
if (!ci)
continue;
Function *ff = ci->getCalledFunction();
if (!ff) {
//ci->dump();
//dbgs() << "\t[called value] " << ci->getOperand(0)->getName() << '\n';
continue;
}
std::vector<Function*>::iterator fit = std::find(otherCalledFuncs->begin(), otherCalledFuncs->end(), ff);
if (fit == otherCalledFuncs->end())
示例7: if
bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
SmallVector<ReturnInst*,16> Returns;
SmallVector<UnwindInst*,16> Unwinds;
SmallVector<InvokeInst*,16> Invokes;
// Look through the terminators of the basic blocks to find invokes, returns
// and unwinds.
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator())) {
// Remember all return instructions in case we insert an invoke into this
// function.
Returns.push_back(RI);
} else if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
Invokes.push_back(II);
} else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
Unwinds.push_back(UI);
}
}
// If we don't have any invokes or unwinds, there's nothing to do.
if (Unwinds.empty() && Invokes.empty()) return false;
// Find the eh.selector.*, eh.exception and alloca calls.
//
// Remember any allocas() that aren't in the entry block, as the
// jmpbuf saved SP will need to be updated for them.
//
// We'll use the first eh.selector to determine the right personality
// function to use. For SJLJ, we always use the same personality for the
// whole function, not on a per-selector basis.
// FIXME: That's a bit ugly. Better way?
SmallVector<CallInst*,16> EH_Selectors;
SmallVector<CallInst*,16> EH_Exceptions;
SmallVector<Instruction*,16> JmpbufUpdatePoints;
// Note: Skip the entry block since there's nothing there that interests
// us. eh.selector and eh.exception shouldn't ever be there, and we
// want to disregard any allocas that are there.
for (Function::iterator BB = F.begin(), E = F.end(); ++BB != E;) {
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
if (CallInst *CI = dyn_cast<CallInst>(I)) {
if (CI->getCalledFunction() == SelectorFn) {
if (!PersonalityFn) PersonalityFn = CI->getArgOperand(1);
EH_Selectors.push_back(CI);
} else if (CI->getCalledFunction() == ExceptionFn) {
EH_Exceptions.push_back(CI);
} else if (CI->getCalledFunction() == StackRestoreFn) {
JmpbufUpdatePoints.push_back(CI);
}
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
JmpbufUpdatePoints.push_back(AI);
}
}
}
// If we don't have any eh.selector calls, we can't determine the personality
// function. Without a personality function, we can't process exceptions.
if (!PersonalityFn) return false;
NumInvokes += Invokes.size();
NumUnwinds += Unwinds.size();
if (!Invokes.empty()) {
// We have invokes, so we need to add register/unregister calls to get
// this function onto the global unwind stack.
//
// First thing we need to do is scan the whole function for values that are
// live across unwind edges. Each value that is live across an unwind edge
// we spill into a stack location, guaranteeing that there is nothing live
// across the unwind edge. This process also splits all critical edges
// coming out of invoke's.
splitLiveRangesAcrossInvokes(Invokes);
BasicBlock *EntryBB = F.begin();
// Create an alloca for the incoming jump buffer ptr and the new jump buffer
// that needs to be restored on all exits from the function. This is an
// alloca because the value needs to be added to the global context list.
unsigned Align = 4; // FIXME: Should be a TLI check?
AllocaInst *FunctionContext =
new AllocaInst(FunctionContextTy, 0, Align,
"fcn_context", F.begin()->begin());
Value *Idxs[2];
const Type *Int32Ty = Type::getInt32Ty(F.getContext());
Value *Zero = ConstantInt::get(Int32Ty, 0);
// We need to also keep around a reference to the call_site field
Idxs[0] = Zero;
Idxs[1] = ConstantInt::get(Int32Ty, 1);
CallSite = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
"call_site",
EntryBB->getTerminator());
// The exception selector comes back in context->data[1]
Idxs[1] = ConstantInt::get(Int32Ty, 2);
Value *FCData = GetElementPtrInst::Create(FunctionContext, Idxs, Idxs+2,
"fc_data",
EntryBB->getTerminator());
Idxs[1] = ConstantInt::get(Int32Ty, 1);
Value *SelectorAddr = GetElementPtrInst::Create(FCData, Idxs, Idxs+2,
"exc_selector_gep",
EntryBB->getTerminator());
// The exception value comes back in context->data[0]
Idxs[1] = Zero;
//.........这里部分代码省略.........
示例8: addInst
GraphNode* Graph::addInst(Value *v) {
GraphNode *Op, *Var, *Operand;
CallInst* CI = dyn_cast<CallInst> (v);
bool hasVarNode = true;
if (isValidInst(v)) { //If is a data manipulator instruction
Var = this->findNode(v);
/*
* If Var is NULL, the value hasn't been processed yet, so we must process it
*
* However, if Var is a Pointer, maybe the memory node already exists but the
* operation node aren't in the graph, yet. Thus we must process it.
*/
if (Var == NULL || (Var != NULL && findOpNode(v) == NULL)) { //If it has not processed yet
//If Var isn't NULL, we won't create another node for it
if (Var == NULL) {
if (CI) {
hasVarNode = !CI->getType()->isVoidTy();
}
if (hasVarNode) {
if (StoreInst* SI = dyn_cast<StoreInst>(v))
Var = addInst(SI->getOperand(1)); // We do this here because we want to represent the store instructions as a flow of information of a data to a memory node
else if ((!isa<Constant> (v)) && isMemoryPointer(v)) {
Var = new MemNode(
USE_ALIAS_SETS ? AS->getValueSetKey(v) : 0, AS);
memNodes[USE_ALIAS_SETS ? AS->getValueSetKey(v) : 0]
= Var;
} else {
Var = new VarNode(v);
varNodes[v] = Var;
}
nodes.insert(Var);
}
}
if (isa<Instruction> (v)) {
if (CI) {
Op = new CallNode(CI);
callNodes[CI] = Op;
} else {
Op = new OpNode(dyn_cast<Instruction> (v)->getOpcode(), v);
}
opNodes[v] = Op;
nodes.insert(Op);
if (hasVarNode)
Op->connect(Var);
//Connect the operands to the OpNode
for (unsigned int i = 0; i < cast<User> (v)->getNumOperands(); i++) {
if (isa<StoreInst> (v) && i == 1)
continue; // We do this here because we want to represent the store instructions as a flow of information of a data to a memory node
Value *v1 = cast<User> (v)->getOperand(i);
Operand = this->addInst(v1);
if (Operand != NULL)
Operand->connect(Op);
}
}
}
return Var;
}
return NULL;
}
示例9: error
void DSWP::insertConsume(Instruction *u, Instruction *v, DType dtype,
int channel, int uthread, int vthread) {
Instruction *oldu = dyn_cast<Instruction>(newToOld[u]);
Instruction *insPos = placeEquivalents[vthread][oldu];
if (insPos == NULL) {
insPos = dyn_cast<Instruction>(instMap[vthread][oldu]);
if (insPos == NULL) {
error("can't insert nowhere");
}
}
// call sync_consume(channel)
Function *fun = module->getFunction("sync_consume");
vector<Value *> args;
args.push_back(ConstantInt::get(Type::getInt32Ty(*context), channel));
CallInst *call = CallInst::Create(fun, args, "c" + itoa(channel), insPos);
if (dtype == REG) {
CastInst *cast;
string name = call->getName().str() + "_val";
if (u->getType()->isIntegerTy()) {
cast = new TruncInst(call, u->getType(), name);
}
else if (u->getType()->isFloatingPointTy()) {
if (u->getType()->isFloatTy())
error("cannot deal with double");
cast = new BitCastInst(call, u->getType(), name);
}
else if (u->getType()->isPointerTy()){
cast = new IntToPtrInst(call, u->getType(), name);
} else {
error("what's the hell type");
}
cast->insertBefore(insPos);
// replace the uses
for (Instruction::use_iterator ui = oldu->use_begin(),
ue = oldu->use_end();
ui != ue; ++ui) {
Instruction *user = dyn_cast<Instruction>(*ui);
if (user == NULL) {
error("used by a non-instruction?");
}
// make sure it's in the same function...
if (user->getParent()->getParent() != v->getParent()->getParent()) {
continue;
}
// call replaceUses so that it handles phi nodes
map<Value *, Value *> reps;
reps[oldu] = cast;
replaceUses(user, reps);
}
} /* TODO: need to handle true memory dependences more than just syncing?
else if (dtype == DTRUE) { //READ after WRITE
error("check mem dep!!");
if (!isa<LoadInst>(v)) {
error("not true dependency");
}
BitCastInst *cast = new BitCastInst(
call, v->getType(), call->getName().str() + "_ptr");
cast->insertBefore(v);
// replace the v with 'cast' in v's thread:
// (other thread with be dealed using dependence)
for (Instruction::use_iterator ui = v->use_begin(), ue = v->use_end();
ui != ue; ui++) {
Instruction *user = dyn_cast<Instruction>(*ui);
if (user == NULL) {
error("how could it be NULL");
}
// int userthread = this->getNewInstAssigned(user);
if (user->getParent()->getParent() != v->getParent()->getParent()) {
continue;
}
for (unsigned i = 0; i < user->getNumOperands(); i++) {
Value * op = user->getOperand(i);
if (op == v) {
user->setOperand(i, cast);
}
}
}
} */ else {
// nothing to do
}
}
示例10: assert
// The fractional part of a float is enough to accurately represent up to
// a 24-bit signed integer.
Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
BinaryOperator &I,
Value *Num, Value *Den,
bool IsDiv, bool IsSigned) const {
assert(Num->getType()->isIntegerTy(32));
const DataLayout &DL = Mod->getDataLayout();
unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
if (LHSSignBits < 9)
return nullptr;
unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
if (RHSSignBits < 9)
return nullptr;
unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
unsigned DivBits = 32 - SignBits;
if (IsSigned)
++DivBits;
Type *Ty = Num->getType();
Type *I32Ty = Builder.getInt32Ty();
Type *F32Ty = Builder.getFloatTy();
ConstantInt *One = Builder.getInt32(1);
Value *JQ = One;
if (IsSigned) {
// char|short jq = ia ^ ib;
JQ = Builder.CreateXor(Num, Den);
// jq = jq >> (bitsize - 2)
JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
// jq = jq | 0x1
JQ = Builder.CreateOr(JQ, One);
}
// int ia = (int)LHS;
Value *IA = Num;
// int ib, (int)RHS;
Value *IB = Den;
// float fa = (float)ia;
Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
: Builder.CreateUIToFP(IA, F32Ty);
// float fb = (float)ib;
Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
: Builder.CreateUIToFP(IB,F32Ty);
Value *RCP = Builder.CreateFDiv(ConstantFP::get(F32Ty, 1.0), FB);
Value *FQM = Builder.CreateFMul(FA, RCP);
// fq = trunc(fqm);
CallInst* FQ = Builder.CreateIntrinsic(Intrinsic::trunc, { FQM });
FQ->copyFastMathFlags(Builder.getFastMathFlags());
// float fqneg = -fq;
Value *FQNeg = Builder.CreateFNeg(FQ);
// float fr = mad(fqneg, fb, fa);
Value *FR = Builder.CreateIntrinsic(Intrinsic::amdgcn_fmad_ftz,
{ FQNeg, FB, FA }, FQ);
// int iq = (int)fq;
Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
: Builder.CreateFPToUI(FQ, I32Ty);
// fr = fabs(fr);
FR = Builder.CreateIntrinsic(Intrinsic::fabs, { FR }, FQ);
// fb = fabs(fb);
FB = Builder.CreateIntrinsic(Intrinsic::fabs, { FB }, FQ);
// int cv = fr >= fb;
Value *CV = Builder.CreateFCmpOGE(FR, FB);
// jq = (cv ? jq : 0);
JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
// dst = iq + jq;
Value *Div = Builder.CreateAdd(IQ, JQ);
Value *Res = Div;
if (!IsDiv) {
// Rem needs compensation, it's easier to recompute it
Value *Rem = Builder.CreateMul(Div, Den);
Res = Builder.CreateSub(Num, Rem);
}
// Truncate to number of bits this divide really is.
if (IsSigned) {
Res = Builder.CreateTrunc(Res, Builder.getIntNTy(DivBits));
Res = Builder.CreateSExt(Res, Ty);
} else {
ConstantInt *TruncMask = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
//.........这里部分代码省略.........
示例11: LLVMGetCalledFunction
LLVMValueRef LLVMGetCalledFunction(LLVMValueRef I)
{
CallInst *CI = (CallInst*)unwrap(I);
return wrap(CI->getCalledValue());
}
示例12: performLocalRetainMotion
/// performLocalRetainMotion - Scan forward from the specified retain, moving it
/// later in the function if possible, over instructions that provably can't
/// release the object. If we get to a release of the object, zap both.
///
/// NOTE: this handles both objc_retain and swift_retain.
///
static bool performLocalRetainMotion(CallInst &Retain, BasicBlock &BB,
SwiftRCIdentity *RC) {
// FIXME: Call classifier should identify the object for us. Too bad C++
// doesn't have nice Swift-style enums.
Value *RetainedObject = RC->getSwiftRCIdentityRoot(Retain.getArgOperand(0));
BasicBlock::iterator BBI = Retain.getIterator(),
BBE = BB.getTerminator()->getIterator();
bool isObjCRetain = Retain.getCalledFunction()->getName() == "objc_retain";
bool MadeProgress = false;
// Scan until we get to the end of the block.
for (++BBI; BBI != BBE; ++BBI) {
Instruction &CurInst = *BBI;
// Classify the instruction. This switch does a "break" when the instruction
// can be skipped and is interesting, and a "continue" when it is a retain
// of the same pointer.
switch (classifyInstruction(CurInst)) {
// These instructions should not reach here based on the pass ordering.
// i.e. LLVMARCOpt -> LLVMContractOpt.
case RT_RetainN:
case RT_UnknownRetainN:
case RT_BridgeRetainN:
case RT_ReleaseN:
case RT_UnknownReleaseN:
case RT_BridgeReleaseN:
llvm_unreachable("These are only created by LLVMARCContract !");
case RT_NoMemoryAccessed:
case RT_AllocObject:
case RT_CheckUnowned:
// Skip over random instructions that don't touch memory. They don't need
// protection by retain/release.
break;
case RT_FixLifetime: // This only stops release motion. Retains can move over it.
break;
case RT_Retain:
case RT_UnknownRetain:
case RT_BridgeRetain:
case RT_RetainUnowned:
case RT_ObjCRetain: { // swift_retain(obj)
//CallInst &ThisRetain = cast<CallInst>(CurInst);
//Value *ThisRetainedObject = ThisRetain.getArgOperand(0);
// If we see a retain of the same object, we can skip over it, but we
// can't count it as progress. Just pushing a retain(x) past a retain(y)
// doesn't change the program.
continue;
}
case RT_UnknownRelease:
case RT_BridgeRelease:
case RT_ObjCRelease:
case RT_Release: {
// If we get to a release that is provably to this object, then we can zap
// it and the retain.
CallInst &ThisRelease = cast<CallInst>(CurInst);
Value *ThisReleasedObject = ThisRelease.getArgOperand(0);
ThisReleasedObject = RC->getSwiftRCIdentityRoot(ThisReleasedObject);
if (ThisReleasedObject == RetainedObject) {
Retain.eraseFromParent();
ThisRelease.eraseFromParent();
if (isObjCRetain) {
++NumObjCRetainReleasePairs;
} else {
++NumRetainReleasePairs;
}
return true;
}
// Otherwise, if this is some other pointer, we can only ignore it if we
// can prove that the two objects don't alias.
// Retain.dump(); ThisRelease.dump(); BB.getParent()->dump();
goto OutOfLoop;
}
case RT_Unknown:
// Loads cannot affect the retain.
if (isa<LoadInst>(CurInst))
continue;
// Load, store, memcpy etc can't do a release.
if (isa<LoadInst>(CurInst) || isa<StoreInst>(CurInst) ||
isa<MemIntrinsic>(CurInst))
break;
// CurInst->dump(); BBI->dump();
// Otherwise, we get to something unknown/unhandled. Bail out for now.
goto OutOfLoop;
//.........这里部分代码省略.........
示例13: performStoreOnlyObjectElimination
/// performStoreOnlyObjectElimination - Scan the graph of uses of the specified
/// object allocation. If the object does not escape and is only stored to
/// (this happens because GVN and other optimizations hoists forward substitutes
/// all stores to the object to eliminate all loads from it), then zap the
/// object and all accesses related to it.
static bool performStoreOnlyObjectElimination(CallInst &Allocation,
BasicBlock::iterator &BBI) {
DtorKind DtorInfo = analyzeDestructor(Allocation.getArgOperand(0));
// We can't delete the object if its destructor has side effects.
if (DtorInfo != DtorKind::NoSideEffects)
return false;
// Do a depth first search exploring all of the uses of the object pointer,
// following through casts, pointer adjustments etc. If we find any loads or
// any escape sites of the object, we give up. If we succeed in walking the
// entire graph of uses, we can remove the resultant set.
SmallSetVector<Instruction*, 16> InvolvedInstructions;
SmallVector<Instruction*, 16> Worklist;
Worklist.push_back(&Allocation);
// Stores - Keep track of all of the store instructions we see.
SmallVector<StoreInst*, 16> Stores;
while (!Worklist.empty()) {
Instruction *I = Worklist.pop_back_val();
// Insert the instruction into our InvolvedInstructions set. If we have
// already seen it, then don't reprocess all of the uses.
if (!InvolvedInstructions.insert(I)) continue;
// Okay, this is the first time we've seen this instruction, proceed.
switch (classifyInstruction(*I)) {
// These instructions should not reach here based on the pass ordering.
// i.e. LLVMARCOpt -> LLVMContractOpt.
case RT_RetainN:
case RT_UnknownRetainN:
case RT_BridgeRetainN:
case RT_ReleaseN:
case RT_UnknownReleaseN:
case RT_BridgeReleaseN:
llvm_unreachable("These are only created by LLVMARCContract !");
case RT_AllocObject:
// If this is a different swift_allocObject than we started with, then
// there is some computation feeding into a size or alignment computation
// that we have to keep... unless we can delete *that* entire object as
// well.
break;
case RT_NoMemoryAccessed:
// If no memory is accessed, then something is being done with the
// pointer: maybe it is bitcast or GEP'd. Since there are no side effects,
// it is perfectly fine to delete this instruction if all uses of the
// instruction are also eliminable.
if (I->mayHaveSideEffects() || isa<TerminatorInst>(I))
return false;
break;
case RT_Release:
case RT_Retain:
case RT_FixLifetime:
case RT_CheckUnowned:
// It is perfectly fine to eliminate various retains and releases of this
// object: we are zapping all accesses or none.
break;
// If this is an unknown instruction, we have more interesting things to
// consider.
case RT_Unknown:
case RT_ObjCRelease:
case RT_ObjCRetain:
case RT_UnknownRetain:
case RT_UnknownRelease:
case RT_BridgeRetain:
case RT_BridgeRelease:
case RT_RetainUnowned:
// Otherwise, this really is some unhandled instruction. Bail out.
return false;
}
// Okay, if we got here, the instruction can be eaten so-long as all of its
// uses can be. Scan through the uses and add them to the worklist for
// recursive processing.
for (auto UI = I->user_begin(), E = I->user_end(); UI != E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
// Handle stores as a special case here: we want to make sure that the
// object is being stored *to*, not itself being stored (which would be an
// escape point). Since stores themselves don't have any uses, we can
// short-cut the classification scheme above.
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// If this is a store *to* the object, we can zap it.
if (UI.getUse().getOperandNo() == StoreInst::getPointerOperandIndex()) {
InvolvedInstructions.insert(SI);
continue;
}
// Otherwise, using the object as a source (or size) is an escape.
return false;
//.........这里部分代码省略.........
示例14: if
/// LowerUnwinds - Turn unwind instructions into calls to _Unwind_Resume,
/// rethrowing any previously caught exception. This will crash horribly
/// at runtime if there is no such exception: using unwind to throw a new
/// exception is currently not supported.
bool DwarfEHPrepare::LowerUnwindsAndResumes() {
SmallVector<Instruction*, 16> ResumeInsts;
for (Function::iterator fi = F->begin(), fe = F->end(); fi != fe; ++fi) {
for (BasicBlock::iterator bi = fi->begin(), be = fi->end(); bi != be; ++bi){
if (isa<UnwindInst>(bi))
ResumeInsts.push_back(bi);
else if (CallInst *call = dyn_cast<CallInst>(bi))
if (Function *fn = dyn_cast<Function>(call->getCalledValue()))
if (fn->getName() == "llvm.eh.resume")
ResumeInsts.push_back(bi);
}
}
if (ResumeInsts.empty()) return false;
// Find the rewind function if we didn't already.
if (!RewindFunction) {
LLVMContext &Ctx = ResumeInsts[0]->getContext();
std::vector<const Type*>
Params(1, Type::getInt8PtrTy(Ctx));
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
Params, false);
const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
RewindFunction = F->getParent()->getOrInsertFunction(RewindName, FTy);
}
bool Changed = false;
for (SmallVectorImpl<Instruction*>::iterator
I = ResumeInsts.begin(), E = ResumeInsts.end(); I != E; ++I) {
Instruction *RI = *I;
// Replace the resuming instruction with a call to _Unwind_Resume (or the
// appropriate target equivalent).
llvm::Value *ExnValue;
if (isa<UnwindInst>(RI))
ExnValue = CreateExceptionValueCall(RI->getParent());
else
ExnValue = cast<CallInst>(RI)->getArgOperand(0);
// Create the call...
CallInst *CI = CallInst::Create(RewindFunction, ExnValue, "", RI);
CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
// ...followed by an UnreachableInst, if it was an unwind.
// Calls to llvm.eh.resume are typically already followed by this.
if (isa<UnwindInst>(RI))
new UnreachableInst(RI->getContext(), RI);
if (isa<UnwindInst>(RI))
++NumUnwindsLowered;
else
++NumResumesLowered;
// Nuke the resume instruction.
RI->eraseFromParent();
Changed = true;
}
return Changed;
}
示例15: assert
bool ReplaceNopCastsAndByteSwaps::processBasicBlock(BasicBlock& BB)
{
bool Changed = false;
/**
* First pass: replace nopCasts with bitcasts and bswap intrinsics with logic operations
*/
for ( BasicBlock::iterator it = BB.begin(); it != BB.end(); )
{
Instruction * Inst = it++;
if (isNopCast(Inst) )
{
assert( isa<CallInst>(Inst) );
CallInst * call = cast<CallInst>(Inst);
if ( TypeSupport::isClientType( call->getType()) )
{
llvm::errs() << "Cast of client type: " << *call << "\n";
continue;
}
if ( TypeSupport::isClientType( call->getArgOperand(0)->getType()) )
{
llvm::errs() << "Cast of client type: " << *call->getArgOperand(0) << "\n";
continue;
}
ReplaceInstWithInst( call, BitCastInst::Create( Instruction::CastOps::BitCast, call->getArgOperand(0), call->getType() ) );
Changed = true;
}
else if( IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst) )
{
if(II->getIntrinsicID() == Intrinsic::bswap)
{
IL->LowerIntrinsicCall(II);
Changed = true;
}
else if(II->getIntrinsicID() == Intrinsic::cheerp_deallocate)
{
II->eraseFromParent();
Changed = true;
}
}
}
/**
* Second pass: collapse bitcasts of bitcasts.
*
* Note: this might leave some dead instruction around, but we don't care since bitcasts are inlined anyway
*/
for ( BasicBlock::iterator it = BB.begin(); it != BB.end(); ++it )
{
if ( isa<BitCastInst>(it) )
{
while ( BitCastInst * src = dyn_cast<BitCastInst>(it->getOperand(0) ) )
{
it->setOperand(0, src->getOperand(0) );
Changed = true;
}
}
}
return Changed;
}