本文整理汇总了C++中LoadInst::isVolatile方法的典型用法代码示例。如果您正苦于以下问题:C++ LoadInst::isVolatile方法的具体用法?C++ LoadInst::isVolatile怎么用?C++ LoadInst::isVolatile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类LoadInst
的用法示例。
在下文中一共展示了LoadInst::isVolatile方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: visitLoadInst
void FuncTransform::visitLoadInst(LoadInst &LI) {
//
// Record the use of the pool handle for the pointer being dereferenced.
//
if (Value *PH = getPoolHandle(LI.getOperand(0)))
AddPoolUse(LI, PH, PoolUses);
//
// If this is a volatile load, then record a use of the pool handle for the
// loaded value, even if it is never used.
//
if (LI.isVolatile()) {
if (Value *PH = getPoolHandle(&LI))
AddPoolUse (LI, PH, PoolUses);
}
visitInstruction(LI);
}
示例2: canVectorizeInst
// Not an instruction handled below to turn into a vector.
//
// TODO: Check isTriviallyVectorizable for calls and handle other
// instructions.
static bool canVectorizeInst(Instruction *Inst, User *User) {
switch (Inst->getOpcode()) {
case Instruction::Load: {
LoadInst *LI = cast<LoadInst>(Inst);
// Currently only handle the case where the Pointer Operand is a GEP so check for that case.
return isa<GetElementPtrInst>(LI->getPointerOperand()) && !LI->isVolatile();
}
case Instruction::BitCast:
case Instruction::AddrSpaceCast:
return true;
case Instruction::Store: {
// Must be the stored pointer operand, not a stored value, plus
// since it should be canonical form, the User should be a GEP.
StoreInst *SI = cast<StoreInst>(Inst);
return (SI->getPointerOperand() == User) && isa<GetElementPtrInst>(User) && !SI->isVolatile();
}
default:
return false;
}
}
示例3: Ranges
//.........这里部分代码省略.........
Instruction *alloc = NULL;
Value *globalPtr = NULL;
// create temporary alloca space to communicate to/from.
alloc = makeAlloca(int8Ty, "agg.tmp", insertBefore,
Range.End-Range.Start, Alignment);
// Generate the old and new base pointers before we output
// anything else.
{
Type* iPtrTy = TD->getIntPtrType(alloc->getType());
Type* iNewBaseTy = TD->getIntPtrType(alloc->getType());
oldBaseI = builder.CreatePtrToInt(StartPtr, iPtrTy, "agg.tmp.oldb.i");
newBaseI = builder.CreatePtrToInt(alloc, iNewBaseTy, "agg.tmp.newb.i");
}
// If storing, do the stores we had into our alloca'd region.
if( isStore ) {
for (SmallVector<Instruction*, 16>::const_iterator
SI = Range.TheStores.begin(),
SE = Range.TheStores.end(); SI != SE; ++SI) {
StoreInst* oldStore = cast<StoreInst>(*SI);
if( DebugThis ) {
errs() << "have store in range:";
oldStore->dump();
}
Value* ptrToAlloc = rebasePointer(oldStore->getPointerOperand(),
StartPtr, alloc, "agg.tmp",
&builder, *TD, oldBaseI, newBaseI);
// Old load must not be volatile or atomic... or we shouldn't have put
// it in ranges
assert(!(oldStore->isVolatile() || oldStore->isAtomic()));
StoreInst* newStore =
builder.CreateStore(oldStore->getValueOperand(), ptrToAlloc);
newStore->setAlignment(oldStore->getAlignment());
newStore->takeName(oldStore);
}
}
// cast the pointer that was load/stored to i8 if necessary.
if( StartPtr->getType()->getPointerElementType() == int8Ty ) {
globalPtr = StartPtr;
} else {
globalPtr = builder.CreatePointerCast(StartPtr, globalInt8PtrTy, "agg.cast");
}
// Get a Constant* for the length.
Constant* len = ConstantInt::get(sizeTy, Range.End-Range.Start, false);
// Now add the memcpy instruction
unsigned addrSpaceDst,addrSpaceSrc;
addrSpaceDst = addrSpaceSrc = 0;
if( isStore ) addrSpaceDst = globalSpace;
if( isLoad ) addrSpaceSrc = globalSpace;
Type *types[3];
types[0] = PointerType::get(int8Ty, addrSpaceDst);
types[1] = PointerType::get(int8Ty, addrSpaceSrc);
types[2] = sizeTy;
Function *func = Intrinsic::getDeclaration(M, Intrinsic::memcpy, types);
Value* args[5]; // dst src len alignment isvolatile
if( isStore ) {
示例4: runOnFunction
bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
SmallVector<LoadInst *, 4> aggrLoads;
SmallVector<MemTransferInst *, 4> aggrMemcpys;
SmallVector<MemSetInst *, 4> aggrMemsets;
DataLayout *TD = &getAnalysis<DataLayout>();
LLVMContext &Context = F.getParent()->getContext();
//
// Collect all the aggrLoads, aggrMemcpys and addrMemsets.
//
//const BasicBlock *firstBB = &F.front(); // first BB in F
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
//BasicBlock *bb = BI;
for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
++II) {
if (LoadInst * load = dyn_cast<LoadInst>(II)) {
if (load->hasOneUse() == false) continue;
if (TD->getTypeStoreSize(load->getType()) < MaxAggrCopySize) continue;
User *use = *(load->use_begin());
if (StoreInst * store = dyn_cast<StoreInst>(use)) {
if (store->getOperand(0) != load) //getValueOperand
continue;
aggrLoads.push_back(load);
}
} else if (MemTransferInst * intr = dyn_cast<MemTransferInst>(II)) {
Value *len = intr->getLength();
// If the number of elements being copied is greater
// than MaxAggrCopySize, lower it to a loop
if (ConstantInt * len_int = dyn_cast < ConstantInt > (len)) {
if (len_int->getZExtValue() >= MaxAggrCopySize) {
aggrMemcpys.push_back(intr);
}
} else {
// turn variable length memcpy/memmov into loop
aggrMemcpys.push_back(intr);
}
} else if (MemSetInst * memsetintr = dyn_cast<MemSetInst>(II)) {
Value *len = memsetintr->getLength();
if (ConstantInt * len_int = dyn_cast<ConstantInt>(len)) {
if (len_int->getZExtValue() >= MaxAggrCopySize) {
aggrMemsets.push_back(memsetintr);
}
} else {
// turn variable length memset into loop
aggrMemsets.push_back(memsetintr);
}
}
}
}
if ((aggrLoads.size() == 0) && (aggrMemcpys.size() == 0)
&& (aggrMemsets.size() == 0)) return false;
//
// Do the transformation of an aggr load/copy/set to a loop
//
for (unsigned i = 0, e = aggrLoads.size(); i != e; ++i) {
LoadInst *load = aggrLoads[i];
StoreInst *store = dyn_cast<StoreInst>(*load->use_begin());
Value *srcAddr = load->getOperand(0);
Value *dstAddr = store->getOperand(1);
unsigned numLoads = TD->getTypeStoreSize(load->getType());
Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);
convertTransferToLoop(store, srcAddr, dstAddr, len, load->isVolatile(),
store->isVolatile(), Context, F);
store->eraseFromParent();
load->eraseFromParent();
}
for (unsigned i = 0, e = aggrMemcpys.size(); i != e; ++i) {
MemTransferInst *cpy = aggrMemcpys[i];
Value *len = cpy->getLength();
// llvm 2.7 version of memcpy does not have volatile
// operand yet. So always making it non-volatile
// optimistically, so that we don't see unnecessary
// st.volatile in ptx
convertTransferToLoop(cpy, cpy->getSource(), cpy->getDest(), len, false,
false, Context, F);
cpy->eraseFromParent();
}
for (unsigned i = 0, e = aggrMemsets.size(); i != e; ++i) {
MemSetInst *memsetinst = aggrMemsets[i];
Value *len = memsetinst->getLength();
Value *val = memsetinst->getValue();
convertMemSetToLoop(memsetinst, memsetinst->getDest(), len, val, Context,
F);
memsetinst->eraseFromParent();
}
return true;
}
示例5: PM
Function * futamurize( const Function * orig_func, DenseMap<const Value*, Value*> &argmap, std::set<const unsigned char *> &constant_addresses_set )
{
LLVMContext &context = getGlobalContext();
// Make a copy of the function, removing constant arguments
Function * specialized_func = CloneFunction( orig_func, argmap );
specialized_func->setName( orig_func->getNameStr() + "_1" );
// add it to our module
LLVM_Module->getFunctionList().push_back( specialized_func );
printf("\nspecialized_func = %p <%s>\n", specialized_func, specialized_func->getName().data());
//~ specialized_func->dump();
// Optimize it
FunctionPassManager PM( LLVM_Module );
createStandardFunctionPasses( &PM, 3 );
PM.add(createScalarReplAggregatesPass()); // Break up aggregate allocas
PM.add(createInstructionCombiningPass()); // Cleanup for scalarrepl.
PM.add(createJumpThreadingPass()); // Thread jumps.
PM.add(createCFGSimplificationPass()); // Merge & remove BBs
PM.add(createInstructionCombiningPass()); // Combine silly seq's
PM.add(createTailCallEliminationPass()); // Eliminate tail calls
PM.add(createCFGSimplificationPass()); // Merge & remove BBs
PM.add(createReassociatePass()); // Reassociate expressions
PM.add(createLoopRotatePass()); // Rotate Loop
PM.add(createLICMPass()); // Hoist loop invariants
PM.add(createLoopUnswitchPass( false ));
PM.add(createInstructionCombiningPass());
PM.add(createIndVarSimplifyPass()); // Canonicalize indvars
PM.add(createLoopDeletionPass()); // Delete dead loops
PM.add(createLoopUnroll2Pass()); // Unroll small loops
PM.add(createInstructionCombiningPass()); // Clean up after the unroller
PM.add(createGVNPass()); // Remove redundancies
PM.add(createMemCpyOptPass()); // Remove memcpy / form memset
PM.add(createSCCPPass()); // Constant prop with SCCP
PM.add(createPromoteMemoryToRegisterPass());
PM.add(createConstantPropagationPass());
PM.add(createDeadStoreEliminationPass());
PM.add(createAggressiveDCEPass());
PM.add(new MemoryDependenceAnalysis());
//~ PM.add(createAAEvalPass());
const PassInfo * pinfo = Pass::lookupPassInfo( "print-alias-sets" );
if( !pinfo ) { printf( "print-alias-sets not found\n" ); exit(-1); }
PM.add( pinfo->createPass() );
FunctionPassManager PM_Inline( LLVM_Module );
PM_Inline.add(createSingleFunctionInliningPass());
bool Changed = false;
int iterations = 2;
int inline_iterations = 6;
do
{
Changed = false;
// first do some optimizations
PM.doInitialization();
PM.run( *specialized_func );
PM.doFinalization();
// Load from Constant Memory detection
const TargetData *TD = LLVM_EE->getTargetData();
for (inst_iterator I = inst_begin(specialized_func), E = inst_end(specialized_func); I != E; ++I)
{
Instruction * inst = (Instruction *) &*I;
// get all Load instructions
LoadInst * load = dyn_cast<LoadInst>( inst );
if( !load ) continue;
if( load->isVolatile() ) continue;
if (load->use_empty()) continue; // Don't muck with dead instructions...
// get the address loaded by load instruction
Value *ptr_value = load->getPointerOperand();
// we're only interested in constant addresses
ConstantExpr * ptr_constant_expr = dyn_cast<ConstantExpr>( ptr_value );
if( !ptr_constant_expr ) continue;
ptr_constant_expr->dump();
// compute real address of constant pointer expression
Constant * ptr_constant = ConstantFoldConstantExpression( ptr_constant_expr, TD );
if( !ptr_constant ) continue;
ptr_constant->dump();
// convert to int constant
ConstantInt *int_constant = dyn_cast<ConstantInt>( ConstantExpr::getPtrToInt( ptr_constant, Type::getInt64Ty( context )));
if( !int_constant ) continue;
int_constant->dump();
// get data size
int data_length = TD->getTypeAllocSize( load->getType() );
ptr_value->getType()->dump();
//.........这里部分代码省略.........