当前位置: 首页>>代码示例>>C++>>正文


C++ basicblock::iterator类代码示例

本文整理汇总了C++中basicblock::iterator的典型用法代码示例。如果您正苦于以下问题:C++ iterator类的具体用法?C++ iterator怎么用?C++ iterator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了iterator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: SimplifyDivRemOfSelect

/// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
/// instruction.
bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
  SelectInst *SI = cast<SelectInst>(I.getOperand(1));
  
  // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
  int NonNullOperand = -1;
  if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
    if (ST->isNullValue())
      NonNullOperand = 2;
  // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
  if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
    if (ST->isNullValue())
      NonNullOperand = 1;
  
  if (NonNullOperand == -1)
    return false;
  
  Value *SelectCond = SI->getOperand(0);
  
  // Change the div/rem to use 'Y' instead of the select.
  I.setOperand(1, SI->getOperand(NonNullOperand));
  
  // Okay, we know we replace the operand of the div/rem with 'Y' with no
  // problem.  However, the select, or the condition of the select may have
  // multiple uses.  Based on our knowledge that the operand must be non-zero,
  // propagate the known value for the select into other uses of it, and
  // propagate a known value of the condition into its other users.
  
  // If the select and condition only have a single use, don't bother with this,
  // early exit.
  if (SI->use_empty() && SelectCond->hasOneUse())
    return true;
  
  // Scan the current block backward, looking for other uses of SI.
  BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin();
  
  while (BBI != BBFront) {
    --BBI;
    // If we found a call to a function, we can't assume it will return, so
    // information from below it cannot be propagated above it.
    if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI))
      break;
    
    // Replace uses of the select or its condition with the known values.
    for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
         I != E; ++I) {
      if (*I == SI) {
        *I = SI->getOperand(NonNullOperand);
        Worklist.Add(BBI);
      } else if (*I == SelectCond) {
        *I = NonNullOperand == 1 ? ConstantInt::getTrue(BBI->getContext()) :
                                   ConstantInt::getFalse(BBI->getContext());
        Worklist.Add(BBI);
      }
    }
    
    // If we past the instruction, quit looking for it.
    if (&*BBI == SI)
      SI = 0;
    if (&*BBI == SelectCond)
      SelectCond = 0;
    
    // If we ran out of things to eliminate, break out of the loop.
    if (SelectCond == 0 && SI == 0)
      break;
    
  }
  return true;
}
开发者ID:2014-class,项目名称:freerouter,代码行数:70,代码来源:InstCombineMulDivRem.cpp

示例2: SimplifyStoreAtEndOfBlock

/// SimplifyStoreAtEndOfBlock - Turn things like:
///   if () { *P = v1; } else { *P = v2 }
/// into a phi node with a store in the successor.
///
/// Simplify things like:
///   *P = v1; if () { *P = v2; }
/// into a phi node with a store in the successor.
///
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
  BasicBlock *StoreBB = SI.getParent();

  // Check to see if the successor block has exactly two incoming edges.  If
  // so, see if the other predecessor contains a store to the same location.
  // if so, insert a PHI node (if needed) and move the stores down.
  BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);

  // Determine whether Dest has exactly two predecessors and, if so, compute
  // the other predecessor.
  pred_iterator PI = pred_begin(DestBB);
  BasicBlock *P = *PI;
  BasicBlock *OtherBB = nullptr;

  if (P != StoreBB)
    OtherBB = P;

  if (++PI == pred_end(DestBB))
    return false;

  P = *PI;
  if (P != StoreBB) {
    if (OtherBB)
      return false;
    OtherBB = P;
  }
  if (++PI != pred_end(DestBB))
    return false;

  // Bail out if all the relevant blocks aren't distinct (this can happen,
  // for example, if SI is in an infinite loop)
  if (StoreBB == DestBB || OtherBB == DestBB)
    return false;

  // Verify that the other block ends in a branch and is not otherwise empty.
  BasicBlock::iterator BBI(OtherBB->getTerminator());
  BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
  if (!OtherBr || BBI == OtherBB->begin())
    return false;

  // If the other block ends in an unconditional branch, check for the 'if then
  // else' case.  there is an instruction before the branch.
  StoreInst *OtherStore = nullptr;
  if (OtherBr->isUnconditional()) {
    --BBI;
    // Skip over debugging info.
    while (isa<DbgInfoIntrinsic>(BBI) ||
           (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
      if (BBI==OtherBB->begin())
        return false;
      --BBI;
    }
    // If this isn't a store, isn't a store to the same location, or is not the
    // right kind of store, bail out.
    OtherStore = dyn_cast<StoreInst>(BBI);
    if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
        !SI.isSameOperationAs(OtherStore))
      return false;
  } else {
    // Otherwise, the other block ended with a conditional branch. If one of the
    // destinations is StoreBB, then we have the if/then case.
    if (OtherBr->getSuccessor(0) != StoreBB &&
        OtherBr->getSuccessor(1) != StoreBB)
      return false;

    // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
    // if/then triangle.  See if there is a store to the same ptr as SI that
    // lives in OtherBB.
    for (;; --BBI) {
      // Check to see if we find the matching store.
      if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
        if (OtherStore->getOperand(1) != SI.getOperand(1) ||
            !SI.isSameOperationAs(OtherStore))
          return false;
        break;
      }
      // If we find something that may be using or overwriting the stored
      // value, or if we run out of instructions, we can't do the xform.
      if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
          BBI == OtherBB->begin())
        return false;
    }

    // In order to eliminate the store in OtherBr, we have to
    // make sure nothing reads or overwrites the stored value in
    // StoreBB.
    for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
      // FIXME: This should really be AA driven.
      if (I->mayReadFromMemory() || I->mayWriteToMemory())
        return false;
    }
  }
//.........这里部分代码省略.........
开发者ID:d0k,项目名称:llvm,代码行数:101,代码来源:InstCombineLoadStoreAlloca.cpp

示例3: ArrObfuscate

void ArrayObfs::ArrObfuscate ( Function *F )
{

	// Iterate the whole Function
	Function *f = F;
	for ( Function::iterator bb = f->begin(); bb != f->end(); ++bb )
	{
		for ( BasicBlock::iterator inst = bb->begin(); inst != bb->end(); )
		{
			if ( inst->getOpcode() == 29 )		// getelementptr
			{
				//errs() << "INST : " << *inst << "\n";

				GetElementPtrInst *Ary = dyn_cast<GetElementPtrInst>(&*inst);
				Value *ptrVal = Ary->getOperand(0);
				Type *type = ptrVal->getType();

				unsigned numOfOprand = Ary->getNumOperands();
				unsigned lastOprand = numOfOprand - 1;

				// Check Type Array

				if ( PointerType *ptrType = dyn_cast<PointerType>( type ) )
				{
					Type *elementType = ptrType->getElementType();
					if ( elementType->isArrayTy() )
					{
						// Skip if Index is a Variable
						if ( dyn_cast<ConstantInt>( Ary->getOperand( lastOprand ) ) )
						{

				//////////////////////////////////////////////////////////////////////////////

				// Do Real Stuff
				Value *oprand = Ary->getOperand( lastOprand );
				Value *basePtr = Ary->getOperand( 0 );
				APInt offset = dyn_cast<ConstantInt>(oprand)->getValue();
				Value *prevPtr = basePtr;

				// Enter a Loop to Perform Random Obfuscation
				unsigned cnt = 100;

				// Prelog : Clone the Original Inst
				unsigned ObfsIdx =  cryptoutils->get_uint64_t() & 0xffff;
				Value *newOprand = ConstantInt::get( oprand->getType(), ObfsIdx );
				Instruction *gep = inst->clone();
				gep->setOperand( lastOprand, newOprand );
				gep->setOperand( 0, prevPtr );
				gep->insertBefore( inst );
				prevPtr = gep;
				offset = offset - ObfsIdx;

				// Create a Global Variable to Avoid Optimization
				Module *M = f->getParent();
				Constant *initGV = ConstantInt::get( prevPtr->getType(), 0 );
				GlobalVariable *gv = new GlobalVariable( *M, prevPtr->getType(), false, GlobalValue::CommonLinkage, initGV );

				while ( cnt-- )
				{
					// Iteratively Generate Obfuscated Code
					switch( cryptoutils->get_uint64_t() & 7 )
					{
					// Random Indexing Obfuscation
					case 0 :
					case 1 :
					case 2 :
						{
						//errs() << "=> Random Index \n";

						// Create New Instruction
						//   Create Obfuscated New Oprand in ConstantInt Type
						unsigned ObfsIdx =  cryptoutils->get_uint64_t() & 0xffff;
						Value *newOprand = ConstantInt::get( oprand->getType(), ObfsIdx );

						//   Create GetElementPtrInst Instruction
						GetElementPtrInst *gep = GetElementPtrInst::Create( prevPtr, newOprand, "", inst );

						//Set prevPtr
						prevPtr = gep;

						//errs() << "Created : " << *prevPtr << "\n";

						offset = offset - ObfsIdx;
						break;
						}

					// Ptr Dereference
					case 3 :
					case 4 :
						{
						//errs() << "=> Ptr Dereference \n";

						Module *M = f->getParent();
						Value *ONE = ConstantInt::get( Type::getInt32Ty( M->getContext() ), 1 );
						Value *tmp = new AllocaInst( prevPtr->getType(), ONE, "", inst );

						new StoreInst( prevPtr, tmp, inst );

						prevPtr = new LoadInst( tmp, "", inst );

//.........这里部分代码省略.........
开发者ID:HighW4y2H3ll,项目名称:PJ_O-LLVM,代码行数:101,代码来源:ArrayObfs.cpp

示例4: runOnModule

bool GenericToNVVM::runOnModule(Module &M) {
  // Create a clone of each global variable that has the default address space.
  // The clone is created with the global address space  specifier, and the pair
  // of original global variable and its clone is placed in the GVMap for later
  // use.

  for (Module::global_iterator I = M.global_begin(), E = M.global_end();
       I != E;) {
    GlobalVariable *GV = &*I++;
    if (GV->getType()->getAddressSpace() == llvm::ADDRESS_SPACE_GENERIC &&
        !llvm::isTexture(*GV) && !llvm::isSurface(*GV) &&
        !llvm::isSampler(*GV) && !GV->getName().startswith("llvm.")) {
      GlobalVariable *NewGV = new GlobalVariable(
          M, GV->getValueType(), GV->isConstant(),
          GV->getLinkage(),
          GV->hasInitializer() ? GV->getInitializer() : nullptr,
          "", GV, GV->getThreadLocalMode(), llvm::ADDRESS_SPACE_GLOBAL);
      NewGV->copyAttributesFrom(GV);
      GVMap[GV] = NewGV;
    }
  }

  // Return immediately, if every global variable has a specific address space
  // specifier.
  if (GVMap.empty()) {
    return false;
  }

  // Walk through the instructions in function defitinions, and replace any use
  // of original global variables in GVMap with a use of the corresponding
  // copies in GVMap.  If necessary, promote constants to instructions.
  for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
    if (I->isDeclaration()) {
      continue;
    }
    IRBuilder<> Builder(I->getEntryBlock().getFirstNonPHIOrDbg());
    for (Function::iterator BBI = I->begin(), BBE = I->end(); BBI != BBE;
         ++BBI) {
      for (BasicBlock::iterator II = BBI->begin(), IE = BBI->end(); II != IE;
           ++II) {
        for (unsigned i = 0, e = II->getNumOperands(); i < e; ++i) {
          Value *Operand = II->getOperand(i);
          if (isa<Constant>(Operand)) {
            II->setOperand(
                i, remapConstant(&M, &*I, cast<Constant>(Operand), Builder));
          }
        }
      }
    }
    ConstantToValueMap.clear();
  }

  // Copy GVMap over to a standard value map.
  ValueToValueMapTy VM;
  for (auto I = GVMap.begin(), E = GVMap.end(); I != E; ++I)
    VM[I->first] = I->second;

  // Walk through the metadata section and update the debug information
  // associated with the global variables in the default address space.
  for (NamedMDNode &I : M.named_metadata()) {
    remapNamedMDNode(VM, &I);
  }

  // Walk through the global variable  initializers, and replace any use of
  // original global variables in GVMap with a use of the corresponding copies
  // in GVMap.  The copies need to be bitcast to the original global variable
  // types, as we cannot use cvta in global variable initializers.
  for (GVMapTy::iterator I = GVMap.begin(), E = GVMap.end(); I != E;) {
    GlobalVariable *GV = I->first;
    GlobalVariable *NewGV = I->second;

    // Remove GV from the map so that it can be RAUWed.  Note that
    // DenseMap::erase() won't invalidate any iterators but this one.
    auto Next = std::next(I);
    GVMap.erase(I);
    I = Next;

    Constant *BitCastNewGV = ConstantExpr::getPointerCast(NewGV, GV->getType());
    // At this point, the remaining uses of GV should be found only in global
    // variable initializers, as other uses have been already been removed
    // while walking through the instructions in function definitions.
    GV->replaceAllUsesWith(BitCastNewGV);
    std::string Name = GV->getName();
    GV->eraseFromParent();
    NewGV->setName(Name);
  }
  assert(GVMap.empty() && "Expected it to be empty by now");

  return true;
}
开发者ID:CSI-LLVM,项目名称:llvm,代码行数:90,代码来源:NVPTXGenericToNVVM.cpp

示例5: Ranges

/// tryAggregating - When scanning forward over instructions, we look for
/// other loads or stores that could be aggregated with this one.
/// Returns the last instruction added (if one was added) since we might have
/// removed some loads or stores and that might invalidate an iterator.
Instruction *AggregateGlobalOpsOpt::tryAggregating(Instruction *StartInst, Value *StartPtr,
    bool DebugThis) {
  if (TD == 0) return 0;

  Module* M = StartInst->getParent()->getParent()->getParent();
  LLVMContext& Context = StartInst->getContext();

  Type* int8Ty = Type::getInt8Ty(Context);
  Type* sizeTy = Type::getInt64Ty(Context);
  Type* globalInt8PtrTy = int8Ty->getPointerTo(globalSpace);
  bool isLoad = isa<LoadInst>(StartInst);
  bool isStore = isa<StoreInst>(StartInst);
  Instruction *lastAddedInsn = NULL;
  Instruction *LastLoadOrStore = NULL;
 
  SmallVector<Instruction*, 8> toRemove;

  // Okay, so we now have a single global load/store. Scan to find
  // all subsequent stores of the same value to offset from the same pointer.
  // Join these together into ranges, so we can decide whether contiguous blocks
  // are stored.
  MemOpRanges Ranges(*TD);
 
  // Put the first store in since we want to preserve the order.
  Ranges.addInst(0, StartInst);

  BasicBlock::iterator BI = StartInst;
  for (++BI; !isa<TerminatorInst>(BI); ++BI) {

    if( isGlobalLoadOrStore(BI, globalSpace, isLoad, isStore) ) {
      // OK!
    } else {
      // If the instruction is readnone, ignore it, otherwise bail out.  We
      // don't even allow readonly here because we don't want something like:
      // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
      if (BI->mayWriteToMemory())
        break;
      if (isStore && BI->mayReadFromMemory())
        break;
      continue;
    }

    if ( isStore && isa<StoreInst>(BI) ) {
      StoreInst *NextStore = cast<StoreInst>(BI);
      // If this is a store, see if we can merge it in.
      if (!NextStore->isSimple()) break;

      // Check to see if this store is to a constant offset from the start ptr.
      int64_t Offset;
      if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
        break;

      Ranges.addStore(Offset, NextStore);
      LastLoadOrStore = NextStore;
    } else {
      LoadInst *NextLoad = cast<LoadInst>(BI);
      if (!NextLoad->isSimple()) break;

      // Check to see if this load is to a constant offset from the start ptr.
      int64_t Offset;
      if (!IsPointerOffset(StartPtr, NextLoad->getPointerOperand(), Offset, *TD))
        break;

      Ranges.addLoad(Offset, NextLoad);
      LastLoadOrStore = NextLoad;
    }
  }

  // If we have no ranges, then we just had a single store with nothing that
  // could be merged in.  This is a very common case of course.
  if (!Ranges.moreThanOneOp())
    return 0;

  // Divide the instructions between StartInst and LastLoadOrStore into
  // addressing, memops, and uses of memops (uses of loads)
  reorderAddressingMemopsUses(StartInst, LastLoadOrStore, DebugThis);

  Instruction* insertBefore = StartInst;
  IRBuilder<> builder(insertBefore);

  // Now that we have full information about ranges, loop over the ranges and
  // emit memcpy's for anything big enough to be worthwhile.
  for (MemOpRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
       I != E; ++I) {
    const MemOpRange &Range = *I;
    Value* oldBaseI = NULL;
    Value* newBaseI = NULL;

    if (Range.TheStores.size() == 1) continue; // Don't bother if there's only one thing...

    builder.SetInsertPoint(insertBefore);

    // Otherwise, we do want to transform this!  Create a new memcpy.
    // Get the starting pointer of the block.
    StartPtr = Range.StartPtr;

//.........这里部分代码省略.........
开发者ID:hildeth,项目名称:chapel,代码行数:101,代码来源:llvmAggregateGlobalOps.cpp

示例6: performCustomLowering

/// runOnFunction - Insert code to maintain the shadow stack.
bool ShadowStackGC::performCustomLowering(Function &F) {
  LLVMContext &Context = F.getContext();
  
  // Find calls to llvm.gcroot.
  CollectRoots(F);

  // If there are no roots in this function, then there is no need to add a
  // stack map entry for it.
  if (Roots.empty())
    return false;

  // Build the constant map and figure the type of the shadow stack entry.
  Value *FrameMap = GetFrameMap(F);
  Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);

  // Build the shadow stack entry at the very start of the function.
  BasicBlock::iterator IP = F.getEntryBlock().begin();
  IRBuilder<> AtEntry(IP->getParent(), IP);

  Instruction *StackEntry   = AtEntry.CreateAlloca(ConcreteStackEntryTy, 0,
                                                   "gc_frame");

  while (isa<AllocaInst>(IP)) ++IP;
  AtEntry.SetInsertPoint(IP->getParent(), IP);

  // Initialize the map pointer and load the current head of the shadow stack.
  Instruction *CurrentHead  = AtEntry.CreateLoad(Head, "gc_currhead");
  Instruction *EntryMapPtr  = CreateGEP(Context, AtEntry, StackEntry,
                                        0,1,"gc_frame.map");
  AtEntry.CreateStore(FrameMap, EntryMapPtr);

  // After all the allocas...
  for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
    // For each root, find the corresponding slot in the aggregate...
    Value *SlotPtr = CreateGEP(Context, AtEntry, StackEntry, 1 + I, "gc_root");

    // And use it in lieu of the alloca.
    AllocaInst *OriginalAlloca = Roots[I].second;
    SlotPtr->takeName(OriginalAlloca);
    OriginalAlloca->replaceAllUsesWith(SlotPtr);
  }

  // Move past the original stores inserted by GCStrategy::InitRoots. This isn't
  // really necessary (the collector would never see the intermediate state at
  // runtime), but it's nicer not to push the half-initialized entry onto the
  // shadow stack.
  while (isa<StoreInst>(IP)) ++IP;
  AtEntry.SetInsertPoint(IP->getParent(), IP);

  // Push the entry onto the shadow stack.
  Instruction *EntryNextPtr = CreateGEP(Context, AtEntry,
                                        StackEntry,0,0,"gc_frame.next");
  Instruction *NewHeadVal   = CreateGEP(Context, AtEntry, 
                                        StackEntry, 0, "gc_newhead");
  AtEntry.CreateStore(CurrentHead, EntryNextPtr);
  AtEntry.CreateStore(NewHeadVal, Head);

  // For each instruction that escapes...
  EscapeEnumerator EE(F, "gc_cleanup");
  while (IRBuilder<> *AtExit = EE.Next()) {
    // Pop the entry from the shadow stack. Don't reuse CurrentHead from
    // AtEntry, since that would make the value live for the entire function.
    Instruction *EntryNextPtr2 = CreateGEP(Context, *AtExit, StackEntry, 0, 0,
                                           "gc_frame.next");
    Value *SavedHead = AtExit->CreateLoad(EntryNextPtr2, "gc_savedhead");
                       AtExit->CreateStore(SavedHead, Head);
  }

  // Delete the original allocas (which are no longer used) and the intrinsic
  // calls (which are no longer valid). Doing this last avoids invalidating
  // iterators.
  for (unsigned I = 0, E = Roots.size(); I != E; ++I) {
    Roots[I].first->eraseFromParent();
    Roots[I].second->eraseFromParent();
  }

  Roots.clear();
  return true;
}
开发者ID:7heaven,项目名称:softart,代码行数:80,代码来源:ShadowStackGC.cpp

示例7: EraseInstFromFunction

Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
  Value *Val = SI.getOperand(0);
  Value *Ptr = SI.getOperand(1);

  // If the RHS is an alloca with a single use, zapify the store, making the
  // alloca dead.
  // If the RHS is an alloca with a two uses, the other one being a 
  // llvm.dbg.declare, zapify the store and the declare, making the
  // alloca dead.  We must do this to prevent declares from affecting
  // codegen.
  if (!SI.isVolatile()) {
    if (Ptr->hasOneUse()) {
      if (isa<AllocaInst>(Ptr)) 
        return EraseInstFromFunction(SI);
      if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
        if (isa<AllocaInst>(GEP->getOperand(0))) {
          if (GEP->getOperand(0)->hasOneUse())
            return EraseInstFromFunction(SI);
          if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
            EraseInstFromFunction(*DI);
            return EraseInstFromFunction(SI);
          }
        }
      }
    }
    if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
      EraseInstFromFunction(*DI);
      return EraseInstFromFunction(SI);
    }
  }

  // Attempt to improve the alignment.
  if (TD) {
    unsigned KnownAlign =
      GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
    unsigned StoreAlign = SI.getAlignment();
    unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
      TD->getABITypeAlignment(Val->getType());

    if (KnownAlign > EffectiveStoreAlign)
      SI.setAlignment(KnownAlign);
    else if (StoreAlign == 0)
      SI.setAlignment(EffectiveStoreAlign);
  }

  // Do really simple DSE, to catch cases where there are several consecutive
  // stores to the same location, separated by a few arithmetic operations. This
  // situation often occurs with bitfield accesses.
  BasicBlock::iterator BBI = &SI;
  for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
       --ScanInsts) {
    --BBI;
    // Don't count debug info directives, lest they affect codegen,
    // and we skip pointer-to-pointer bitcasts, which are NOPs.
    if (isa<DbgInfoIntrinsic>(BBI) ||
        (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
      ScanInsts++;
      continue;
    }    
    
    if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
      // Prev store isn't volatile, and stores to the same location?
      if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
                                                          SI.getOperand(1))) {
        ++NumDeadStore;
        ++BBI;
        EraseInstFromFunction(*PrevSI);
        continue;
      }
      break;
    }
    
    // If this is a load, we have to stop.  However, if the loaded value is from
    // the pointer we're loading and is producing the pointer we're storing,
    // then *this* store is dead (X = load P; store X -> P).
    if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
      if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
          !SI.isVolatile())
        return EraseInstFromFunction(SI);
      
      // Otherwise, this is a load from some other location.  Stores before it
      // may not be dead.
      break;
    }
    
    // Don't skip over loads or things that can modify memory.
    if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
      break;
  }
  
  
  if (SI.isVolatile()) return 0;  // Don't hack volatile stores.

  // store X, null    -> turns into 'unreachable' in SimplifyCFG
  if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
    if (!isa<UndefValue>(Val)) {
      SI.setOperand(0, UndefValue::get(Val->getType()));
      if (Instruction *U = dyn_cast<Instruction>(Val))
        Worklist.Add(U);  // Dropped a use.
    }
//.........这里部分代码省略.........
开发者ID:dmlap,项目名称:llvm-js-backend,代码行数:101,代码来源:InstCombineLoadStoreAlloca.cpp

示例8: InjectCoverageAtBlock

void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB,
                                                    bool UseCalls) {
  // Don't insert coverage for unreachable blocks: we will never call
  // __sanitizer_cov() for them, so counting them in
  // NumberOfInstrumentedBlocks() might complicate calculation of code coverage
  // percentage. Also, unreachable instructions frequently have no debug
  // locations.
  if (isa<UnreachableInst>(BB.getTerminator()))
    return;
  BasicBlock::iterator IP = BB.getFirstInsertionPt();

  bool IsEntryBB = &BB == &F.getEntryBlock();
  DebugLoc EntryLoc;
  if (IsEntryBB) {
    if (auto SP = getDISubprogram(&F))
      EntryLoc = DebugLoc::get(SP->getScopeLine(), 0, SP);
    // Keep static allocas and llvm.localescape calls in the entry block.  Even
    // if we aren't splitting the block, it's nice for allocas to be before
    // calls.
    IP = PrepareToSplitEntryBlock(BB, IP);
  } else {
    EntryLoc = IP->getDebugLoc();
  }

  IRBuilder<> IRB(&*IP);
  IRB.SetCurrentDebugLocation(EntryLoc);
  Value *GuardP = IRB.CreateAdd(
      IRB.CreatePointerCast(GuardArray, IntptrTy),
      ConstantInt::get(IntptrTy, (1 + NumberOfInstrumentedBlocks()) * 4));
  Type *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty());
  GuardP = IRB.CreateIntToPtr(GuardP, Int32PtrTy);
  if (Options.TracePC) {
    IRB.CreateCall(SanCovTracePC);
  } else if (Options.TraceBB) {
    IRB.CreateCall(IsEntryBB ? SanCovTraceEnter : SanCovTraceBB, GuardP);
  } else if (UseCalls) {
    IRB.CreateCall(SanCovWithCheckFunction, GuardP);
  } else {
    LoadInst *Load = IRB.CreateLoad(GuardP);
    Load->setAtomic(Monotonic);
    Load->setAlignment(4);
    SetNoSanitizeMetadata(Load);
    Value *Cmp = IRB.CreateICmpSGE(Constant::getNullValue(Load->getType()), Load);
    Instruction *Ins = SplitBlockAndInsertIfThen(
        Cmp, &*IP, false, MDBuilder(*C).createBranchWeights(1, 100000));
    IRB.SetInsertPoint(Ins);
    IRB.SetCurrentDebugLocation(EntryLoc);
    // __sanitizer_cov gets the PC of the instruction using GET_CALLER_PC.
    IRB.CreateCall(SanCovFunction, GuardP);
    IRB.CreateCall(EmptyAsm, {}); // Avoids callback merge.
  }

  if (Options.Use8bitCounters) {
    IRB.SetInsertPoint(&*IP);
    Value *P = IRB.CreateAdd(
        IRB.CreatePointerCast(EightBitCounterArray, IntptrTy),
        ConstantInt::get(IntptrTy, NumberOfInstrumentedBlocks() - 1));
    P = IRB.CreateIntToPtr(P, IRB.getInt8PtrTy());
    LoadInst *LI = IRB.CreateLoad(P);
    Value *Inc = IRB.CreateAdd(LI, ConstantInt::get(IRB.getInt8Ty(), 1));
    StoreInst *SI = IRB.CreateStore(Inc, P);
    SetNoSanitizeMetadata(LI);
    SetNoSanitizeMetadata(SI);
  }
}
开发者ID:chrisdiamand,项目名称:llvm,代码行数:65,代码来源:SanitizerCoverage.cpp

示例9: assert

/// SplitBlockPredecessors - This method transforms BB by introducing a new
/// basic block into the function, and moving some of the predecessors of BB to
/// be predecessors of the new block.  The new predecessors are indicated by the
/// Preds array, which has NumPreds elements in it.  The new block is given a
/// suffix of 'Suffix'.
///
/// This currently updates the LLVM IR, AliasAnalysis, DominatorTree,
/// DominanceFrontier, LoopInfo, and LCCSA but no other analyses.
/// In particular, it does not preserve LoopSimplify (because it's
/// complicated to handle the case where one of the edges being split
/// is an exit of a loop with other exits).
///
BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB, 
                                         BasicBlock *const *Preds,
                                         unsigned NumPreds, const char *Suffix,
                                         Pass *P) {
  // Create new basic block, insert right before the original block.
  BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), BB->getName()+Suffix,
                                         BB->getParent(), BB);
  
  // The new block unconditionally branches to the old block.
  BranchInst *BI = BranchInst::Create(BB, NewBB);
  
  LoopInfo *LI = P ? P->getAnalysisIfAvailable<LoopInfo>() : 0;
  Loop *L = LI ? LI->getLoopFor(BB) : 0;
  bool PreserveLCSSA = P->mustPreserveAnalysisID(LCSSAID);

  // Move the edges from Preds to point to NewBB instead of BB.
  // While here, if we need to preserve loop analyses, collect
  // some information about how this split will affect loops.
  bool HasLoopExit = false;
  bool IsLoopEntry = !!L;
  bool SplitMakesNewLoopHeader = false;
  for (unsigned i = 0; i != NumPreds; ++i) {
    // This is slightly more strict than necessary; the minimum requirement
    // is that there be no more than one indirectbr branching to BB. And
    // all BlockAddress uses would need to be updated.
    assert(!isa<IndirectBrInst>(Preds[i]->getTerminator()) &&
           "Cannot split an edge from an IndirectBrInst");

    Preds[i]->getTerminator()->replaceUsesOfWith(BB, NewBB);

    if (LI) {
      // If we need to preserve LCSSA, determine if any of
      // the preds is a loop exit.
      if (PreserveLCSSA)
        if (Loop *PL = LI->getLoopFor(Preds[i]))
          if (!PL->contains(BB))
            HasLoopExit = true;
      // If we need to preserve LoopInfo, note whether any of the
      // preds crosses an interesting loop boundary.
      if (L) {
        if (L->contains(Preds[i]))
          IsLoopEntry = false;
        else
          SplitMakesNewLoopHeader = true;
      }
    }
  }

  // Update dominator tree and dominator frontier if available.
  DominatorTree *DT = P ? P->getAnalysisIfAvailable<DominatorTree>() : 0;
  if (DT)
    DT->splitBlock(NewBB);
  if (DominanceFrontier *DF = P ? P->getAnalysisIfAvailable<DominanceFrontier>():0)
    DF->splitBlock(NewBB);

  // Insert a new PHI node into NewBB for every PHI node in BB and that new PHI
  // node becomes an incoming value for BB's phi node.  However, if the Preds
  // list is empty, we need to insert dummy entries into the PHI nodes in BB to
  // account for the newly created predecessor.
  if (NumPreds == 0) {
    // Insert dummy values as the incoming value.
    for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++I)
      cast<PHINode>(I)->addIncoming(UndefValue::get(I->getType()), NewBB);
    return NewBB;
  }

  AliasAnalysis *AA = P ? P->getAnalysisIfAvailable<AliasAnalysis>() : 0;

  if (L) {
    if (IsLoopEntry) {
      // Add the new block to the nearest enclosing loop (and not an
      // adjacent loop). To find this, examine each of the predecessors and
      // determine which loops enclose them, and select the most-nested loop
      // which contains the loop containing the block being split.
      Loop *InnermostPredLoop = 0;
      for (unsigned i = 0; i != NumPreds; ++i)
        if (Loop *PredLoop = LI->getLoopFor(Preds[i])) {
          // Seek a loop which actually contains the block being split (to
          // avoid adjacent loops).
          while (PredLoop && !PredLoop->contains(BB))
            PredLoop = PredLoop->getParentLoop();
          // Select the most-nested of these loops which contains the block.
          if (PredLoop &&
              PredLoop->contains(BB) &&
              (!InnermostPredLoop ||
               InnermostPredLoop->getLoopDepth() < PredLoop->getLoopDepth()))
            InnermostPredLoop = PredLoop;
        }
//.........这里部分代码省略.........
开发者ID:aaasz,项目名称:SHP,代码行数:101,代码来源:BasicBlockUtils.cpp

示例10: RemoveBlockIfDead

/// RemoveBlockIfDead - If the specified block is dead, remove it, update loop
/// information, and remove any dead successors it has.
///
void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
                                     std::vector<Instruction*> &Worklist,
                                     Loop *L) {
  if (pred_begin(BB) != pred_end(BB)) {
    // This block isn't dead, since an edge to BB was just removed, see if there
    // are any easy simplifications we can do now.
    if (BasicBlock *Pred = BB->getSinglePredecessor()) {
      // If it has one pred, fold phi nodes in BB.
      while (isa<PHINode>(BB->begin()))
        ReplaceUsesOfWith(BB->begin(),
                          cast<PHINode>(BB->begin())->getIncomingValue(0),
                          Worklist, L, LPM);

      // If this is the header of a loop and the only pred is the latch, we now
      // have an unreachable loop.
      if (Loop *L = LI->getLoopFor(BB))
        if (loopHeader == BB && L->contains(Pred)) {
          // Remove the branch from the latch to the header block, this makes
          // the header dead, which will make the latch dead (because the header
          // dominates the latch).
          LPM->deleteSimpleAnalysisValue(Pred->getTerminator(), L);
          Pred->getTerminator()->eraseFromParent();
          new UnreachableInst(BB->getContext(), Pred);

          // The loop is now broken, remove it from LI.
          RemoveLoopFromHierarchy(L);

          // Reprocess the header, which now IS dead.
          RemoveBlockIfDead(BB, Worklist, L);
          return;
        }

      // If pred ends in a uncond branch, add uncond branch to worklist so that
      // the two blocks will get merged.
      if (BranchInst *BI = dyn_cast<BranchInst>(Pred->getTerminator()))
        if (BI->isUnconditional())
          Worklist.push_back(BI);
    }
    return;
  }

  DEBUG(dbgs() << "Nuking dead block: " << *BB);

  // Remove the instructions in the basic block from the worklist.
  for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
    RemoveFromWorklist(I, Worklist);

    // Anything that uses the instructions in this basic block should have their
    // uses replaced with undefs.
    // If I is not void type then replaceAllUsesWith undef.
    // This allows ValueHandlers and custom metadata to adjust itself.
    if (!I->getType()->isVoidTy())
      I->replaceAllUsesWith(UndefValue::get(I->getType()));
  }

  // If this is the edge to the header block for a loop, remove the loop and
  // promote all subloops.
  if (Loop *BBLoop = LI->getLoopFor(BB)) {
    if (BBLoop->getLoopLatch() == BB) {
      RemoveLoopFromHierarchy(BBLoop);
      if (currentLoop == BBLoop) {
        currentLoop = 0;
        redoLoop = false;
      }
    }
  }

  // Remove the block from the loop info, which removes it from any loops it
  // was in.
  LI->removeBlock(BB);


  // Remove phi node entries in successors for this block.
  TerminatorInst *TI = BB->getTerminator();
  SmallVector<BasicBlock*, 4> Succs;
  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
    Succs.push_back(TI->getSuccessor(i));
    TI->getSuccessor(i)->removePredecessor(BB);
  }

  // Unique the successors, remove anything with multiple uses.
  array_pod_sort(Succs.begin(), Succs.end());
  Succs.erase(std::unique(Succs.begin(), Succs.end()), Succs.end());

  // Remove the basic block, including all of the instructions contained in it.
  LPM->deleteSimpleAnalysisValue(BB, L);
  BB->eraseFromParent();
  // Remove successor blocks here that are not dead, so that we know we only
  // have dead blocks in this list.  Nondead blocks have a way of becoming dead,
  // then getting removed before we revisit them, which is badness.
  //
  for (unsigned i = 0; i != Succs.size(); ++i)
    if (pred_begin(Succs[i]) != pred_end(Succs[i])) {
      // One exception is loop headers.  If this block was the preheader for a
      // loop, then we DO want to visit the loop so the loop gets deleted.
      // We know that if the successor is a loop header, that this loop had to
      // be the preheader: the case where this was the latch block was handled
//.........这里部分代码省略.........
开发者ID:Abocer,项目名称:android-4.2_r1,代码行数:101,代码来源:LoopUnswitch.cpp

示例11: shouldSpeculateInstrs

/// Determine whether the instructions in this range may be safely and cheaply
/// speculated. This is not an important enough situation to develop complex
/// heuristics. We handle a single arithmetic instruction along with any type
/// conversions.
static bool shouldSpeculateInstrs(BasicBlock::iterator Begin,
                                  BasicBlock::iterator End, Loop *L) {
  bool seenIncrement = false;
  bool MultiExitLoop = false;

  if (!L->getExitingBlock())
    MultiExitLoop = true;

  for (BasicBlock::iterator I = Begin; I != End; ++I) {

    if (!isSafeToSpeculativelyExecute(I))
      return false;

    if (isa<DbgInfoIntrinsic>(I))
      continue;

    switch (I->getOpcode()) {
    default:
      return false;
    case Instruction::GetElementPtr:
      // GEPs are cheap if all indices are constant.
      if (!cast<GEPOperator>(I)->hasAllConstantIndices())
        return false;
      // fall-thru to increment case
    case Instruction::Add:
    case Instruction::Sub:
    case Instruction::And:
    case Instruction::Or:
    case Instruction::Xor:
    case Instruction::Shl:
    case Instruction::LShr:
    case Instruction::AShr: {
      Value *IVOpnd = !isa<Constant>(I->getOperand(0))
                          ? I->getOperand(0)
                          : !isa<Constant>(I->getOperand(1))
                                ? I->getOperand(1)
                                : nullptr;
      if (!IVOpnd)
        return false;

      // If increment operand is used outside of the loop, this speculation
      // could cause extra live range interference.
      if (MultiExitLoop) {
        for (User *UseI : IVOpnd->users()) {
          auto *UserInst = cast<Instruction>(UseI);
          if (!L->contains(UserInst))
            return false;
        }
      }

      if (seenIncrement)
        return false;
      seenIncrement = true;
      break;
    }
    case Instruction::Trunc:
    case Instruction::ZExt:
    case Instruction::SExt:
      // ignore type conversions
      break;
    }
  }
  return true;
}
开发者ID:AmesianX,项目名称:llvm-othergen,代码行数:68,代码来源:LoopRotation.cpp

示例12: IsTrivialUnswitchCondition

/// IsTrivialUnswitchCondition - Check to see if this unswitch condition is
/// trivial: that is, that the condition controls whether or not the loop does
/// anything at all.  If this is a trivial condition, unswitching produces no
/// code duplications (equivalently, it produces a simpler loop and a new empty
/// loop, which gets deleted).
///
/// If this is a trivial condition, return true, otherwise return false.  When
/// returning true, this sets Cond and Val to the condition that controls the
/// trivial condition: when Cond dynamically equals Val, the loop is known to
/// exit.  Finally, this sets LoopExit to the BB that the loop exits to when
/// Cond == Val.
///
bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
                                       BasicBlock **LoopExit) {
  BasicBlock *Header = currentLoop->getHeader();
  TerminatorInst *HeaderTerm = Header->getTerminator();
  LLVMContext &Context = Header->getContext();

  BasicBlock *LoopExitBB = 0;
  if (BranchInst *BI = dyn_cast<BranchInst>(HeaderTerm)) {
    // If the header block doesn't end with a conditional branch on Cond, we
    // can't handle it.
    if (!BI->isConditional() || BI->getCondition() != Cond)
      return false;

    // Check to see if a successor of the branch is guaranteed to
    // exit through a unique exit block without having any
    // side-effects.  If so, determine the value of Cond that causes it to do
    // this.
    if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
                                             BI->getSuccessor(0)))) {
      if (Val) *Val = ConstantInt::getTrue(Context);
    } else if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
                                                    BI->getSuccessor(1)))) {
      if (Val) *Val = ConstantInt::getFalse(Context);
    }
  } else if (SwitchInst *SI = dyn_cast<SwitchInst>(HeaderTerm)) {
    // If this isn't a switch on Cond, we can't handle it.
    if (SI->getCondition() != Cond) return false;

    // Check to see if a successor of the switch is guaranteed to go to the
    // latch block or exit through a one exit block without having any
    // side-effects.  If so, determine the value of Cond that causes it to do
    // this.
    // Note that we can't trivially unswitch on the default case or
    // on already unswitched cases.
    for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
         i != e; ++i) {
      BasicBlock* LoopExitCandidate;
      if ((LoopExitCandidate = isTrivialLoopExitBlock(currentLoop,
                                               i.getCaseSuccessor()))) {
        // Okay, we found a trivial case, remember the value that is trivial.
        ConstantInt* CaseVal = i.getCaseValue();

        // Check that it was not unswitched before, since already unswitched
        // trivial vals are looks trivial too.
        if (BranchesInfo.isUnswitched(SI, CaseVal))
          continue;
        LoopExitBB = LoopExitCandidate;
        if (Val) *Val = CaseVal;
        break;
      }
    }
  }

  // If we didn't find a single unique LoopExit block, or if the loop exit block
  // contains phi nodes, this isn't trivial.
  if (!LoopExitBB || isa<PHINode>(LoopExitBB->begin()))
    return false;   // Can't handle this.

  if (LoopExit) *LoopExit = LoopExitBB;

  // We already know that nothing uses any scalar values defined inside of this
  // loop.  As such, we just have to check to see if this loop will execute any
  // side-effecting instructions (e.g. stores, calls, volatile loads) in the
  // part of the loop that the code *would* execute.  We already checked the
  // tail, check the header now.
  for (BasicBlock::iterator I = Header->begin(), E = Header->end(); I != E; ++I)
    if (I->mayHaveSideEffects())
      return false;
  return true;
}
开发者ID:Abocer,项目名称:android-4.2_r1,代码行数:82,代码来源:LoopUnswitch.cpp

示例13: EraseInstFromFunction

Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
    Value *Val = SI.getOperand(0);
    Value *Ptr = SI.getOperand(1);

    // Try to canonicalize the stored type.
    if (combineStoreToValueType(*this, SI))
        return EraseInstFromFunction(SI);

    // Attempt to improve the alignment.
    unsigned KnownAlign = getOrEnforceKnownAlignment(
                              Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
    unsigned StoreAlign = SI.getAlignment();
    unsigned EffectiveStoreAlign =
        StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());

    if (KnownAlign > EffectiveStoreAlign)
        SI.setAlignment(KnownAlign);
    else if (StoreAlign == 0)
        SI.setAlignment(EffectiveStoreAlign);

    // Try to canonicalize the stored type.
    if (unpackStoreToAggregate(*this, SI))
        return EraseInstFromFunction(SI);

    // Replace GEP indices if possible.
    if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
        Worklist.Add(NewGEPI);
        return &SI;
    }

    // Don't hack volatile/atomic stores.
    // FIXME: Some bits are legal for atomic stores; needs refactoring.
    if (!SI.isSimple()) return nullptr;

    // If the RHS is an alloca with a single use, zapify the store, making the
    // alloca dead.
    if (Ptr->hasOneUse()) {
        if (isa<AllocaInst>(Ptr))
            return EraseInstFromFunction(SI);
        if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
            if (isa<AllocaInst>(GEP->getOperand(0))) {
                if (GEP->getOperand(0)->hasOneUse())
                    return EraseInstFromFunction(SI);
            }
        }
    }

    // Do really simple DSE, to catch cases where there are several consecutive
    // stores to the same location, separated by a few arithmetic operations. This
    // situation often occurs with bitfield accesses.
    BasicBlock::iterator BBI = &SI;
    for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
            --ScanInsts) {
        --BBI;
        // Don't count debug info directives, lest they affect codegen,
        // and we skip pointer-to-pointer bitcasts, which are NOPs.
        if (isa<DbgInfoIntrinsic>(BBI) ||
                (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
            ScanInsts++;
            continue;
        }

        if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
            // Prev store isn't volatile, and stores to the same location?
            if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
                    SI.getOperand(1))) {
                ++NumDeadStore;
                ++BBI;
                EraseInstFromFunction(*PrevSI);
                continue;
            }
            break;
        }

        // If this is a load, we have to stop.  However, if the loaded value is from
        // the pointer we're loading and is producing the pointer we're storing,
        // then *this* store is dead (X = load P; store X -> P).
        if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
            if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
                    LI->isSimple())
                return EraseInstFromFunction(SI);

            // Otherwise, this is a load from some other location.  Stores before it
            // may not be dead.
            break;
        }

        // Don't skip over loads or things that can modify memory.
        if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
            break;
    }

    // store X, null    -> turns into 'unreachable' in SimplifyCFG
    if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
        if (!isa<UndefValue>(Val)) {
            SI.setOperand(0, UndefValue::get(Val->getType()));
            if (Instruction *U = dyn_cast<Instruction>(Val))
                Worklist.Add(U);  // Dropped a use.
        }
        return nullptr;  // Do not modify these!
//.........这里部分代码省略.........
开发者ID:IanLee1521,项目名称:ares,代码行数:101,代码来源:InstCombineLoadStoreAlloca.cpp

示例14: mightUseCTR

bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) {
  for (BasicBlock::iterator J = BB->begin(), JE = BB->end();
       J != JE; ++J) {
    if (CallInst *CI = dyn_cast<CallInst>(J)) {
      if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) {
        // Inline ASM is okay, unless it clobbers the ctr register.
        InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints();
        for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) {
          InlineAsm::ConstraintInfo &C = CIV[i];
          if (C.Type != InlineAsm::isInput)
            for (unsigned j = 0, je = C.Codes.size(); j < je; ++j)
              if (StringRef(C.Codes[j]).equals_lower("{ctr}"))
                return true;
        }

        continue;
      }

      if (!TM)
        return true;
      const TargetLowering *TLI = TM->getTargetLowering();

      if (Function *F = CI->getCalledFunction()) {
        // Most intrinsics don't become function calls, but some might.
        // sin, cos, exp and log are always calls.
        unsigned Opcode;
        if (F->getIntrinsicID() != Intrinsic::not_intrinsic) {
          switch (F->getIntrinsicID()) {
          default: continue;

// VisualStudio defines setjmp as _setjmp
#if defined(_MSC_VER) && defined(setjmp) && \
                       !defined(setjmp_undefined_for_msvc)
#  pragma push_macro("setjmp")
#  undef setjmp
#  define setjmp_undefined_for_msvc
#endif

          case Intrinsic::setjmp:

#if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc)
 // let's return it to _setjmp state
#  pragma pop_macro("setjmp")
#  undef setjmp_undefined_for_msvc
#endif

          case Intrinsic::longjmp:

          // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp
          // because, although it does clobber the counter register, the
          // control can't then return to inside the loop unless there is also
          // an eh_sjlj_setjmp.
          case Intrinsic::eh_sjlj_setjmp:

          case Intrinsic::memcpy:
          case Intrinsic::memmove:
          case Intrinsic::memset:
          case Intrinsic::powi:
          case Intrinsic::log:
          case Intrinsic::log2:
          case Intrinsic::log10:
          case Intrinsic::exp:
          case Intrinsic::exp2:
          case Intrinsic::pow:
          case Intrinsic::sin:
          case Intrinsic::cos:
            return true;
          case Intrinsic::copysign:
            if (CI->getArgOperand(0)->getType()->getScalarType()->
                isPPC_FP128Ty())
              return true;
            else
              continue; // ISD::FCOPYSIGN is never a library call.
          case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
          case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
          case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
          case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
          case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
          case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
          case Intrinsic::round:     Opcode = ISD::FROUND;     break;
          }
        }

        // PowerPC does not use [US]DIVREM or other library calls for
        // operations on regular types which are not otherwise library calls
        // (i.e. soft float or atomics). If adapting for targets that do,
        // additional care is required here.

        LibFunc::Func Func;
        if (!F->hasLocalLinkage() && F->hasName() && LibInfo &&
            LibInfo->getLibFunc(F->getName(), Func) &&
            LibInfo->hasOptimizedCodeGen(Func)) {
          // Non-read-only functions are never treated as intrinsics.
          if (!CI->onlyReadsMemory())
            return true;

          // Conversion happens only for FP calls.
          if (!CI->getArgOperand(0)->getType()->isFloatingPointTy())
            return true;

//.........这里部分代码省略.........
开发者ID:0xDEC0DE8,项目名称:mcsema,代码行数:101,代码来源:PPCCTRLoops.cpp

示例15: getOperandName

string getOperandName(BasicBlock::iterator inst)
{
	return (inst->getOperand(1))->getName().str();
}
开发者ID:rakeshgeci,项目名称:mop-llvm,代码行数:4,代码来源:GCSE.cpp


注:本文中的basicblock::iterator类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。