当前位置: 首页>>代码示例>>C++>>正文


C++ SmallPtrSet类代码示例

本文整理汇总了C++中SmallPtrSet的典型用法代码示例。如果您正苦于以下问题:C++ SmallPtrSet类的具体用法?C++ SmallPtrSet怎么用?C++ SmallPtrSet使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了SmallPtrSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: runOnMachineFunction

bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
  if (DisablePeephole)
    return false;

  TM  = &MF.getTarget();
  TII = TM->getInstrInfo();
  MRI = &MF.getRegInfo();
  DT  = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;

  bool Changed = false;

  SmallPtrSet<MachineInstr*, 8> LocalMIs;
  SmallSet<unsigned, 4> ImmDefRegs;
  DenseMap<unsigned, MachineInstr*> ImmDefMIs;
  for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
    MachineBasicBlock *MBB = &*I;

    bool SeenMoveImm = false;
    LocalMIs.clear();
    ImmDefRegs.clear();
    ImmDefMIs.clear();

    bool First = true;
    MachineBasicBlock::iterator PMII;
    for (MachineBasicBlock::iterator
           MII = I->begin(), MIE = I->end(); MII != MIE; ) {
      MachineInstr *MI = &*MII;
      LocalMIs.insert(MI);

      if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
          MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
          MI->hasUnmodeledSideEffects()) {
        ++MII;
        continue;
      }

      if (MI->isBitcast()) {
        if (optimizeBitcastInstr(MI, MBB)) {
          // MI is deleted.
          LocalMIs.erase(MI);
          Changed = true;
          MII = First ? I->begin() : llvm::next(PMII);
          continue;
        }
      } else if (MI->isCompare()) {
        if (optimizeCmpInstr(MI, MBB)) {
          // MI is deleted.
          LocalMIs.erase(MI);
          Changed = true;
          MII = First ? I->begin() : llvm::next(PMII);
          continue;
        }
      }

      if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
        SeenMoveImm = true;
      } else {
        Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
        if (SeenMoveImm)
          Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
      }

      First = false;
      PMII = MII;
      ++MII;
    }
  }

  return Changed;
}
开发者ID:sandssss,项目名称:llvm,代码行数:70,代码来源:PeepholeOptimizer.cpp

示例2: BBSet

void StackColoring::calculateLocalLiveness() {
  // Perform a standard reverse dataflow computation to solve for
  // global liveness.  The BEGIN set here is equivalent to KILL in the standard
  // formulation, and END is equivalent to GEN.  The result of this computation
  // is a map from blocks to bitvectors where the bitvectors represent which
  // allocas are live in/out of that block.
  SmallPtrSet<const MachineBasicBlock*, 8> BBSet(BasicBlockNumbering.begin(),
                                                 BasicBlockNumbering.end());
  unsigned NumSSMIters = 0;
  bool changed = true;
  while (changed) {
    changed = false;
    ++NumSSMIters;

    SmallPtrSet<const MachineBasicBlock*, 8> NextBBSet;

    for (const MachineBasicBlock *BB : BasicBlockNumbering) {
      if (!BBSet.count(BB)) continue;

      // Use an iterator to avoid repeated lookups.
      LivenessMap::iterator BI = BlockLiveness.find(BB);
      assert(BI != BlockLiveness.end() && "Block not found");
      BlockLifetimeInfo &BlockInfo = BI->second;

      BitVector LocalLiveIn;
      BitVector LocalLiveOut;

      // Forward propagation from begins to ends.
      for (MachineBasicBlock::const_pred_iterator PI = BB->pred_begin(),
           PE = BB->pred_end(); PI != PE; ++PI) {
        LivenessMap::const_iterator I = BlockLiveness.find(*PI);
        assert(I != BlockLiveness.end() && "Predecessor not found");
        LocalLiveIn |= I->second.LiveOut;
      }
      LocalLiveIn |= BlockInfo.End;
      LocalLiveIn.reset(BlockInfo.Begin);

      // Reverse propagation from ends to begins.
      for (MachineBasicBlock::const_succ_iterator SI = BB->succ_begin(),
           SE = BB->succ_end(); SI != SE; ++SI) {
        LivenessMap::const_iterator I = BlockLiveness.find(*SI);
        assert(I != BlockLiveness.end() && "Successor not found");
        LocalLiveOut |= I->second.LiveIn;
      }
      LocalLiveOut |= BlockInfo.Begin;
      LocalLiveOut.reset(BlockInfo.End);

      LocalLiveIn |= LocalLiveOut;
      LocalLiveOut |= LocalLiveIn;

      // After adopting the live bits, we need to turn-off the bits which
      // are de-activated in this block.
      LocalLiveOut.reset(BlockInfo.End);
      LocalLiveIn.reset(BlockInfo.Begin);

      // If we have both BEGIN and END markers in the same basic block then
      // we know that the BEGIN marker comes after the END, because we already
      // handle the case where the BEGIN comes before the END when collecting
      // the markers (and building the BEGIN/END vectore).
      // Want to enable the LIVE_IN and LIVE_OUT of slots that have both
      // BEGIN and END because it means that the value lives before and after
      // this basic block.
      BitVector LocalEndBegin = BlockInfo.End;
      LocalEndBegin &= BlockInfo.Begin;
      LocalLiveIn |= LocalEndBegin;
      LocalLiveOut |= LocalEndBegin;

      if (LocalLiveIn.test(BlockInfo.LiveIn)) {
        changed = true;
        BlockInfo.LiveIn |= LocalLiveIn;

        NextBBSet.insert(BB->pred_begin(), BB->pred_end());
      }

      if (LocalLiveOut.test(BlockInfo.LiveOut)) {
        changed = true;
        BlockInfo.LiveOut |= LocalLiveOut;

        NextBBSet.insert(BB->succ_begin(), BB->succ_end());
      }
    }

    BBSet = std::move(NextBBSet);
  }// while changed.
}
开发者ID:AnachroNia,项目名称:llvm,代码行数:85,代码来源:StackColoring.cpp

示例3: ComputeLiveInBlocks

/// At this point, we're committed to promoting the alloca using IDF's, and the
/// standard SSA construction algorithm.  Determine which blocks need phi nodes
/// and see if we can optimize out some work by avoiding insertion of dead phi
/// nodes.
void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
                                             AllocaInfo &Info) {
  // Unique the set of defining blocks for efficient lookup.
  SmallPtrSet<BasicBlock *, 32> DefBlocks;
  DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end());

  // Determine which blocks the value is live in.  These are blocks which lead
  // to uses.
  SmallPtrSet<BasicBlock *, 32> LiveInBlocks;
  ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks);

  // Use a priority queue keyed on dominator tree level so that inserted nodes
  // are handled from the bottom of the dominator tree upwards.
  typedef std::pair<DomTreeNode *, unsigned> DomTreeNodePair;
  typedef std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>,
                              less_second> IDFPriorityQueue;
  IDFPriorityQueue PQ;

  for (SmallPtrSet<BasicBlock *, 32>::const_iterator I = DefBlocks.begin(),
                                                     E = DefBlocks.end();
       I != E; ++I) {
    if (DomTreeNode *Node = DT.getNode(*I))
      PQ.push(std::make_pair(Node, DomLevels[Node]));
  }

  SmallVector<std::pair<unsigned, BasicBlock *>, 32> DFBlocks;
  SmallPtrSet<DomTreeNode *, 32> Visited;
  SmallVector<DomTreeNode *, 32> Worklist;
  while (!PQ.empty()) {
    DomTreeNodePair RootPair = PQ.top();
    PQ.pop();
    DomTreeNode *Root = RootPair.first;
    unsigned RootLevel = RootPair.second;

    // Walk all dominator tree children of Root, inspecting their CFG edges with
    // targets elsewhere on the dominator tree. Only targets whose level is at
    // most Root's level are added to the iterated dominance frontier of the
    // definition set.

    Worklist.clear();
    Worklist.push_back(Root);

    while (!Worklist.empty()) {
      DomTreeNode *Node = Worklist.pop_back_val();
      BasicBlock *BB = Node->getBlock();

      for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
           ++SI) {
        DomTreeNode *SuccNode = DT.getNode(*SI);

        // Quickly skip all CFG edges that are also dominator tree edges instead
        // of catching them below.
        if (SuccNode->getIDom() == Node)
          continue;

        unsigned SuccLevel = DomLevels[SuccNode];
        if (SuccLevel > RootLevel)
          continue;

        if (!Visited.insert(SuccNode))
          continue;

        BasicBlock *SuccBB = SuccNode->getBlock();
        if (!LiveInBlocks.count(SuccBB))
          continue;

        DFBlocks.push_back(std::make_pair(BBNumbers[SuccBB], SuccBB));
        if (!DefBlocks.count(SuccBB))
          PQ.push(std::make_pair(SuccNode, SuccLevel));
      }

      for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end(); CI != CE;
           ++CI) {
        if (!Visited.count(*CI))
          Worklist.push_back(*CI);
      }
    }
  }

  if (DFBlocks.size() > 1)
    std::sort(DFBlocks.begin(), DFBlocks.end());

  unsigned CurrentVersion = 0;
  for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i)
    QueuePhiNode(DFBlocks[i].second, AllocaNum, CurrentVersion);
}
开发者ID:rui314,项目名称:llvm,代码行数:90,代码来源:PromoteMemoryToRegister.cpp

示例4: optimizeExtInstr

/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
/// a single register and writes a single register and it does not modify the
/// source, and if the source value is preserved as a sub-register of the
/// result, then replace all reachable uses of the source with the subreg of the
/// result.
///
/// Do not generate an EXTRACT that is used only in a debug use, as this changes
/// the code. Since this code does not currently share EXTRACTs, just ignore all
/// debug uses.
bool PeepholeOptimizer::
optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
                 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
  unsigned SrcReg, DstReg, SubIdx;
  if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
    return false;

  if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
      TargetRegisterInfo::isPhysicalRegister(SrcReg))
    return false;

  if (MRI->hasOneNonDBGUse(SrcReg))
    // No other uses.
    return false;

  // Ensure DstReg can get a register class that actually supports
  // sub-registers. Don't change the class until we commit.
  const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
  DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
  if (!DstRC)
    return false;

  // The ext instr may be operating on a sub-register of SrcReg as well.
  // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
  // register.
  // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
  // SrcReg:SubIdx should be replaced.
  bool UseSrcSubIdx = TM->getRegisterInfo()->
    getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0;

  // The source has other uses. See if we can replace the other uses with use of
  // the result of the extension.
  SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
  for (MachineRegisterInfo::use_nodbg_iterator
       UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
       UI != UE; ++UI)
    ReachedBBs.insert(UI->getParent());

  // Uses that are in the same BB of uses of the result of the instruction.
  SmallVector<MachineOperand*, 8> Uses;

  // Uses that the result of the instruction can reach.
  SmallVector<MachineOperand*, 8> ExtendedUses;

  bool ExtendLife = true;
  for (MachineRegisterInfo::use_nodbg_iterator
       UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
       UI != UE; ++UI) {
    MachineOperand &UseMO = UI.getOperand();
    MachineInstr *UseMI = &*UI;
    if (UseMI == MI)
      continue;

    if (UseMI->isPHI()) {
      ExtendLife = false;
      continue;
    }

    // Only accept uses of SrcReg:SubIdx.
    if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
      continue;

    // It's an error to translate this:
    //
    //    %reg1025 = <sext> %reg1024
    //     ...
    //    %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
    //
    // into this:
    //
    //    %reg1025 = <sext> %reg1024
    //     ...
    //    %reg1027 = COPY %reg1025:4
    //    %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
    //
    // The problem here is that SUBREG_TO_REG is there to assert that an
    // implicit zext occurs. It doesn't insert a zext instruction. If we allow
    // the COPY here, it will give us the value after the <sext>, not the
    // original value of %reg1024 before <sext>.
    if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
      continue;

    MachineBasicBlock *UseMBB = UseMI->getParent();
    if (UseMBB == MBB) {
      // Local uses that come after the extension.
      if (!LocalMIs.count(UseMI))
        Uses.push_back(&UseMO);
    } else if (ReachedBBs.count(UseMBB)) {
      // Non-local uses where the result of the extension is used. Always
      // replace these unless it's a PHI.
      Uses.push_back(&UseMO);
//.........这里部分代码省略.........
开发者ID:7heaven,项目名称:softart,代码行数:101,代码来源:PeepholeOptimizer.cpp

示例5: InlineCallIfPossible

/// InlineCallIfPossible - If it is possible to inline the specified call site,
/// do so and update the CallGraph for this operation.
///
/// This function also does some basic book-keeping to update the IR.  The
/// InlinedArrayAllocas map keeps track of any allocas that are already
/// available from other  functions inlined into the caller.  If we are able to
/// inline this call site we attempt to reuse already available allocas or add
/// any new allocas to the set if not possible.
static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
                                 InlinedArrayAllocasTy &InlinedArrayAllocas,
                                 int InlineHistory) {
  Function *Callee = CS.getCalledFunction();
  Function *Caller = CS.getCaller();

  // Try to inline the function.  Get the list of static allocas that were
  // inlined.
  if (!InlineFunction(CS, IFI))
    return false;

  // If the inlined function had a higher stack protection level than the
  // calling function, then bump up the caller's stack protection level.
  if (Callee->hasFnAttr(Attribute::StackProtectReq))
    Caller->addFnAttr(Attribute::StackProtectReq);
  else if (Callee->hasFnAttr(Attribute::StackProtect) &&
           !Caller->hasFnAttr(Attribute::StackProtectReq))
    Caller->addFnAttr(Attribute::StackProtect);

  // Look at all of the allocas that we inlined through this call site.  If we
  // have already inlined other allocas through other calls into this function,
  // then we know that they have disjoint lifetimes and that we can merge them.
  //
  // There are many heuristics possible for merging these allocas, and the
  // different options have different tradeoffs.  One thing that we *really*
  // don't want to hurt is SRoA: once inlining happens, often allocas are no
  // longer address taken and so they can be promoted.
  //
  // Our "solution" for that is to only merge allocas whose outermost type is an
  // array type.  These are usually not promoted because someone is using a
  // variable index into them.  These are also often the most important ones to
  // merge.
  //
  // A better solution would be to have real memory lifetime markers in the IR
  // and not have the inliner do any merging of allocas at all.  This would
  // allow the backend to do proper stack slot coloring of all allocas that
  // *actually make it to the backend*, which is really what we want.
  //
  // Because we don't have this information, we do this simple and useful hack.
  //
  SmallPtrSet<AllocaInst*, 16> UsedAllocas;
  
  // When processing our SCC, check to see if CS was inlined from some other
  // call site.  For example, if we're processing "A" in this code:
  //   A() { B() }
  //   B() { x = alloca ... C() }
  //   C() { y = alloca ... }
  // Assume that C was not inlined into B initially, and so we're processing A
  // and decide to inline B into A.  Doing this makes an alloca available for
  // reuse and makes a callsite (C) available for inlining.  When we process
  // the C call site we don't want to do any alloca merging between X and Y
  // because their scopes are not disjoint.  We could make this smarter by
  // keeping track of the inline history for each alloca in the
  // InlinedArrayAllocas but this isn't likely to be a significant win.
  if (InlineHistory != -1)  // Only do merging for top-level call sites in SCC.
    return true;
  
  // Loop over all the allocas we have so far and see if they can be merged with
  // a previously inlined alloca.  If not, remember that we had it.
  for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
       AllocaNo != e; ++AllocaNo) {
    AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
    
    // Don't bother trying to merge array allocations (they will usually be
    // canonicalized to be an allocation *of* an array), or allocations whose
    // type is not itself an array (because we're afraid of pessimizing SRoA).
    const ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
    if (ATy == 0 || AI->isArrayAllocation())
      continue;
    
    // Get the list of all available allocas for this array type.
    std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
    
    // Loop over the allocas in AllocasForType to see if we can reuse one.  Note
    // that we have to be careful not to reuse the same "available" alloca for
    // multiple different allocas that we just inlined, we use the 'UsedAllocas'
    // set to keep track of which "available" allocas are being used by this
    // function.  Also, AllocasForType can be empty of course!
    bool MergedAwayAlloca = false;
    for (unsigned i = 0, e = AllocasForType.size(); i != e; ++i) {
      AllocaInst *AvailableAlloca = AllocasForType[i];
      
      // The available alloca has to be in the right function, not in some other
      // function in this SCC.
      if (AvailableAlloca->getParent() != AI->getParent())
        continue;
      
      // If the inlined function already uses this alloca then we can't reuse
      // it.
      if (!UsedAllocas.insert(AvailableAlloca))
        continue;
      
//.........这里部分代码省略.........
开发者ID:dmlap,项目名称:llvm-js-backend,代码行数:101,代码来源:Inliner.cpp

示例6: GetUnderlyingObject

/// handleEndBlock - Remove dead stores to stack-allocated locations in the
/// function end block.  Ex:
/// %A = alloca i32
/// ...
/// store i32 1, i32* %A
/// ret void
bool DSE::handleEndBlock(BasicBlock &BB) {
  bool MadeChange = false;
  
  // Keep track of all of the stack objects that are dead at the end of the
  // function.
  SmallPtrSet<Value*, 16> DeadStackObjects;
  
  // Find all of the alloca'd pointers in the entry block.
  BasicBlock *Entry = BB.getParent()->begin();
  for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I)
    if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
      DeadStackObjects.insert(AI);
  
  // Treat byval arguments the same, stores to them are dead at the end of the
  // function.
  for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
       AE = BB.getParent()->arg_end(); AI != AE; ++AI)
    if (AI->hasByValAttr())
      DeadStackObjects.insert(AI);
  
  // Scan the basic block backwards
  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
    --BBI;
    
    // If we find a store, check to see if it points into a dead stack value.
    if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
      // See through pointer-to-pointer bitcasts
      Value *Pointer = GetUnderlyingObject(getStoredPointerOperand(BBI));

      // Stores to stack values are valid candidates for removal.
      if (DeadStackObjects.count(Pointer)) {
        Instruction *Dead = BBI++;
        
        DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
                     << *Dead << "\n  Object: " << *Pointer << '\n');
        
        // DCE instructions only used to calculate that store.
        DeleteDeadInstruction(Dead, *MD, &DeadStackObjects);
        ++NumFastStores;
        MadeChange = true;
        continue;
      }
    }
    
    // Remove any dead non-memory-mutating instructions.
    if (isInstructionTriviallyDead(BBI)) {
      Instruction *Inst = BBI++;
      DeleteDeadInstruction(Inst, *MD, &DeadStackObjects);
      ++NumFastOther;
      MadeChange = true;
      continue;
    }
    
    if (AllocaInst *A = dyn_cast<AllocaInst>(BBI)) {
      DeadStackObjects.erase(A);
      continue;
    }
    
    if (CallSite CS = cast<Value>(BBI)) {
      // If this call does not access memory, it can't be loading any of our
      // pointers.
      if (AA->doesNotAccessMemory(CS))
        continue;
      
      unsigned NumModRef = 0, NumOther = 0;
      
      // If the call might load from any of our allocas, then any store above
      // the call is live.
      SmallVector<Value*, 8> LiveAllocas;
      for (SmallPtrSet<Value*, 16>::iterator I = DeadStackObjects.begin(),
           E = DeadStackObjects.end(); I != E; ++I) {
        // If we detect that our AA is imprecise, it's not worth it to scan the
        // rest of the DeadPointers set.  Just assume that the AA will return
        // ModRef for everything, and go ahead and bail out.
        if (NumModRef >= 16 && NumOther == 0)
          return MadeChange;

        // See if the call site touches it.
        AliasAnalysis::ModRefResult A = 
          AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA));
        
        if (A == AliasAnalysis::ModRef)
          ++NumModRef;
        else
          ++NumOther;
        
        if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
          LiveAllocas.push_back(*I);
      }
      
      for (SmallVector<Value*, 8>::iterator I = LiveAllocas.begin(),
           E = LiveAllocas.end(); I != E; ++I)
        DeadStackObjects.erase(*I);
      
//.........这里部分代码省略.........
开发者ID:colgur,项目名称:llvm,代码行数:101,代码来源:DeadStoreElimination.cpp

示例7: while

/// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
/// single uncond branch between them, and BB contains no other non-phi
/// instructions.
bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
                                    const BasicBlock *DestBB) const {
  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
  // the successor.  If there are more complex condition (e.g. preheaders),
  // don't mess around with them.
  BasicBlock::const_iterator BBI = BB->begin();
  while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
    for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
         UI != E; ++UI) {
      const Instruction *User = cast<Instruction>(*UI);
      if (User->getParent() != DestBB || !isa<PHINode>(User))
        return false;
      // If User is inside DestBB block and it is a PHINode then check
      // incoming value. If incoming value is not from BB then this is
      // a complex condition (e.g. preheaders) we want to avoid here.
      if (User->getParent() == DestBB) {
        if (const PHINode *UPN = dyn_cast<PHINode>(User))
          for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
            Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
            if (Insn && Insn->getParent() == BB &&
                Insn->getParent() != UPN->getIncomingBlock(I))
              return false;
          }
      }
    }
  }

  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
  // and DestBB may have conflicting incoming values for the block.  If so, we
  // can't merge the block.
  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
  if (!DestBBPN) return true;  // no conflict.

  // Collect the preds of BB.
  SmallPtrSet<const BasicBlock*, 16> BBPreds;
  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
    // It is faster to get preds from a PHI than with pred_iterator.
    for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
      BBPreds.insert(BBPN->getIncomingBlock(i));
  } else {
    BBPreds.insert(pred_begin(BB), pred_end(BB));
  }

  // Walk the preds of DestBB.
  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
    BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
    if (BBPreds.count(Pred)) {   // Common predecessor?
      BBI = DestBB->begin();
      while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
        const Value *V1 = PN->getIncomingValueForBlock(Pred);
        const Value *V2 = PN->getIncomingValueForBlock(BB);

        // If V2 is a phi node in BB, look up what the mapped value will be.
        if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
          if (V2PN->getParent() == BB)
            V2 = V2PN->getIncomingValueForBlock(Pred);

        // If there is a conflict, bail out.
        if (V1 != V2) return false;
      }
    }
  }

  return true;
}
开发者ID:jyasskin,项目名称:llvm-mirror,代码行数:68,代码来源:CodeGenPrepare.cpp

示例8: runOnSCC

bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
  SmallPtrSet<CallGraphNode *, 8> SCCNodes;
  CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
  bool MadeChange = false;

  // Fill SCCNodes with the elements of the SCC.  Used for quickly
  // looking up whether a given CallGraphNode is in this SCC.
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
    SCCNodes.insert(*I);

  // First pass, scan all of the functions in the SCC, simplifying them
  // according to what we know.
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I)
    if (Function *F = (*I)->getFunction())
      MadeChange |= SimplifyFunction(F);

  // Next, check to see if any callees might throw or if there are any external
  // functions in this SCC: if so, we cannot prune any functions in this SCC.
  // Definitions that are weak and not declared non-throwing might be 
  // overridden at linktime with something that throws, so assume that.
  // If this SCC includes the unwind instruction, we KNOW it throws, so
  // obviously the SCC might throw.
  //
  bool SCCMightUnwind = false, SCCMightReturn = false;
  for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); 
       (!SCCMightUnwind || !SCCMightReturn) && I != E; ++I) {
    Function *F = (*I)->getFunction();
    if (!F) {
      SCCMightUnwind = true;
      SCCMightReturn = true;
    } else if (F->isDeclaration() || F->mayBeOverridden()) {
      SCCMightUnwind |= !F->doesNotThrow();
      SCCMightReturn |= !F->doesNotReturn();
    } else {
      bool CheckUnwind = !SCCMightUnwind && !F->doesNotThrow();
      bool CheckReturn = !SCCMightReturn && !F->doesNotReturn();
      // Determine if we should scan for InlineAsm in a naked function as it
      // is the only way to return without a ReturnInst.  Only do this for
      // no-inline functions as functions which may be inlined cannot
      // meaningfully return via assembly.
      bool CheckReturnViaAsm = CheckReturn &&
                               F->hasFnAttribute(Attribute::Naked) &&
                               F->hasFnAttribute(Attribute::NoInline);

      if (!CheckUnwind && !CheckReturn)
        continue;

      for (const BasicBlock &BB : *F) {
        const TerminatorInst *TI = BB.getTerminator();
        if (CheckUnwind && TI->mayThrow()) {
          SCCMightUnwind = true;
        } else if (CheckReturn && isa<ReturnInst>(TI)) {
          SCCMightReturn = true;
        }

        for (const Instruction &I : BB) {
          if ((!CheckUnwind || SCCMightUnwind) &&
              (!CheckReturnViaAsm || SCCMightReturn))
            break;

          // Check to see if this function performs an unwind or calls an
          // unwinding function.
          if (CheckUnwind && !SCCMightUnwind && I.mayThrow()) {
            bool InstMightUnwind = true;
            if (const auto *CI = dyn_cast<CallInst>(&I)) {
              if (Function *Callee = CI->getCalledFunction()) {
                CallGraphNode *CalleeNode = CG[Callee];
                // If the callee is outside our current SCC then we may throw
                // because it might.  If it is inside, do nothing.
                if (SCCNodes.count(CalleeNode) > 0)
                  InstMightUnwind = false;
              }
            }
            SCCMightUnwind |= InstMightUnwind;
          }
          if (CheckReturnViaAsm && !SCCMightReturn)
            if (auto ICS = ImmutableCallSite(&I))
              if (const auto *IA = dyn_cast<InlineAsm>(ICS.getCalledValue()))
                if (IA->hasSideEffects())
                  SCCMightReturn = true;
        }

        if (SCCMightUnwind && SCCMightReturn)
          break;
      }
    }
  }

  // If the SCC doesn't unwind or doesn't throw, note this fact.
  if (!SCCMightUnwind || !SCCMightReturn)
    for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
      AttrBuilder NewAttributes;

      if (!SCCMightUnwind)
        NewAttributes.addAttribute(Attribute::NoUnwind);
      if (!SCCMightReturn)
        NewAttributes.addAttribute(Attribute::NoReturn);

      Function *F = (*I)->getFunction();
      const AttributeSet &PAL = F->getAttributes().getFnAttributes();
//.........这里部分代码省略.........
开发者ID:cadets,项目名称:llvm,代码行数:101,代码来源:PruneEH.cpp

示例9: runOnModule

bool AMDGPUAlwaysInline::runOnModule(Module &M) {
  AMDGPUAS AMDGPUAS = AMDGPU::getAMDGPUAS(M);

  std::vector<GlobalAlias*> AliasesToRemove;

  SmallPtrSet<Function *, 8> FuncsToAlwaysInline;
  SmallPtrSet<Function *, 8> FuncsToNoInline;

  for (GlobalAlias &A : M.aliases()) {
    if (Function* F = dyn_cast<Function>(A.getAliasee())) {
      A.replaceAllUsesWith(F);
      AliasesToRemove.push_back(&A);
    }

    // FIXME: If the aliasee isn't a function, it's some kind of constant expr
    // cast that won't be inlined through.
  }

  if (GlobalOpt) {
    for (GlobalAlias* A : AliasesToRemove) {
      A->eraseFromParent();
    }
  }

  // Always force inlining of any function that uses an LDS global address. This
  // is something of a workaround because we don't have a way of supporting LDS
  // objects defined in functions. LDS is always allocated by a kernel, and it
  // is difficult to manage LDS usage if a function may be used by multiple
  // kernels.
  //
  // OpenCL doesn't allow declaring LDS in non-kernels, so in practice this
  // should only appear when IPO passes manages to move LDs defined in a kernel
  // into a single user function.

  for (GlobalVariable &GV : M.globals()) {
    // TODO: Region address
    unsigned AS = GV.getType()->getAddressSpace();
    if (AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS.REGION_ADDRESS)
      continue;

    recursivelyVisitUsers(GV, FuncsToAlwaysInline);
  }

  if (!AMDGPUTargetMachine::EnableFunctionCalls || StressCalls) {
    auto IncompatAttr
      = StressCalls ? Attribute::AlwaysInline : Attribute::NoInline;

    for (Function &F : M) {
      if (!F.isDeclaration() && !F.use_empty() &&
          !F.hasFnAttribute(IncompatAttr)) {
        if (StressCalls) {
          if (!FuncsToAlwaysInline.count(&F))
            FuncsToNoInline.insert(&F);
        } else
          FuncsToAlwaysInline.insert(&F);
      }
    }
  }

  for (Function *F : FuncsToAlwaysInline)
    F->addFnAttr(Attribute::AlwaysInline);

  for (Function *F : FuncsToNoInline)
    F->addFnAttr(Attribute::NoInline);

  return !FuncsToAlwaysInline.empty() || !FuncsToNoInline.empty();
}
开发者ID:bkaradzic,项目名称:SwiftShader,代码行数:67,代码来源:AMDGPUAlwaysInlinePass.cpp

示例10: ReduxDesc

bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurrenceKind Kind,
        Loop *TheLoop, bool HasFunNoNaNAttr,
        RecurrenceDescriptor &RedDes) {
    if (Phi->getNumIncomingValues() != 2)
        return false;

    // Reduction variables are only found in the loop header block.
    if (Phi->getParent() != TheLoop->getHeader())
        return false;

    // Obtain the reduction start value from the value that comes from the loop
    // preheader.
    Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());

    // ExitInstruction is the single value which is used outside the loop.
    // We only allow for a single reduction value to be used outside the loop.
    // This includes users of the reduction, variables (which form a cycle
    // which ends in the phi node).
    Instruction *ExitInstruction = nullptr;
    // Indicates that we found a reduction operation in our scan.
    bool FoundReduxOp = false;

    // We start with the PHI node and scan for all of the users of this
    // instruction. All users must be instructions that can be used as reduction
    // variables (such as ADD). We must have a single out-of-block user. The cycle
    // must include the original PHI.
    bool FoundStartPHI = false;

    // To recognize min/max patterns formed by a icmp select sequence, we store
    // the number of instruction we saw from the recognized min/max pattern,
    //  to make sure we only see exactly the two instructions.
    unsigned NumCmpSelectPatternInst = 0;
    InstDesc ReduxDesc(false, nullptr);

    // Data used for determining if the recurrence has been type-promoted.
    Type *RecurrenceType = Phi->getType();
    SmallPtrSet<Instruction *, 4> CastInsts;
    Instruction *Start = Phi;
    bool IsSigned = false;

    SmallPtrSet<Instruction *, 8> VisitedInsts;
    SmallVector<Instruction *, 8> Worklist;

    // Return early if the recurrence kind does not match the type of Phi. If the
    // recurrence kind is arithmetic, we attempt to look through AND operations
    // resulting from the type promotion performed by InstCombine.  Vector
    // operations are not limited to the legal integer widths, so we may be able
    // to evaluate the reduction in the narrower width.
    if (RecurrenceType->isFloatingPointTy()) {
        if (!isFloatingPointRecurrenceKind(Kind))
            return false;
    } else {
        if (!isIntegerRecurrenceKind(Kind))
            return false;
        if (isArithmeticRecurrenceKind(Kind))
            Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
    }

    Worklist.push_back(Start);
    VisitedInsts.insert(Start);

    // A value in the reduction can be used:
    //  - By the reduction:
    //      - Reduction operation:
    //        - One use of reduction value (safe).
    //        - Multiple use of reduction value (not safe).
    //      - PHI:
    //        - All uses of the PHI must be the reduction (safe).
    //        - Otherwise, not safe.
    //  - By one instruction outside of the loop (safe).
    //  - By further instructions outside of the loop (not safe).
    //  - By an instruction that is not part of the reduction (not safe).
    //    This is either:
    //      * An instruction type other than PHI or the reduction operation.
    //      * A PHI in the header other than the initial PHI.
    while (!Worklist.empty()) {
        Instruction *Cur = Worklist.back();
        Worklist.pop_back();

        // No Users.
        // If the instruction has no users then this is a broken chain and can't be
        // a reduction variable.
        if (Cur->use_empty())
            return false;

        bool IsAPhi = isa<PHINode>(Cur);

        // A header PHI use other than the original PHI.
        if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
            return false;

        // Reductions of instructions such as Div, and Sub is only possible if the
        // LHS is the reduction variable.
        if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
                !isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
                !VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
            return false;

        // Any reduction instruction must be of one of the allowed kinds. We ignore
        // the starting value (the Phi or an AND instruction if the Phi has been
//.........这里部分代码省略.........
开发者ID:shepmaster,项目名称:llvm,代码行数:101,代码来源:LoopUtils.cpp

示例11: GetUnderlyingObject

/// findValueImpl - Implementation helper for findValue.
Value *Lint::findValueImpl(Value *V, bool OffsetOk,
                           SmallPtrSetImpl<Value *> &Visited) const {
  // Detect self-referential values.
  if (!Visited.insert(V).second)
    return UndefValue::get(V->getType());

  // TODO: Look through sext or zext cast, when the result is known to
  // be interpreted as signed or unsigned, respectively.
  // TODO: Look through eliminable cast pairs.
  // TODO: Look through calls with unique return values.
  // TODO: Look through vector insert/extract/shuffle.
  V = OffsetOk ? GetUnderlyingObject(V, *DL) : V->stripPointerCasts();
  if (LoadInst *L = dyn_cast<LoadInst>(V)) {
    BasicBlock::iterator BBI = L->getIterator();
    BasicBlock *BB = L->getParent();
    SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
    for (;;) {
      if (!VisitedBlocks.insert(BB).second)
        break;
      if (Value *U =
          FindAvailableLoadedValue(L, BB, BBI, DefMaxInstsToScan, AA))
        return findValueImpl(U, OffsetOk, Visited);
      if (BBI != BB->begin()) break;
      BB = BB->getUniquePredecessor();
      if (!BB) break;
      BBI = BB->end();
    }
  } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
    if (Value *W = PN->hasConstantValue())
      if (W != V)
        return findValueImpl(W, OffsetOk, Visited);
  } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
    if (CI->isNoopCast(*DL))
      return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
  } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
    if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
                                     Ex->getIndices()))
      if (W != V)
        return findValueImpl(W, OffsetOk, Visited);
  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
    // Same as above, but for ConstantExpr instead of Instruction.
    if (Instruction::isCast(CE->getOpcode())) {
      if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
                               CE->getOperand(0)->getType(), CE->getType(),
                               DL->getIntPtrType(V->getType())))
        return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
    } else if (CE->getOpcode() == Instruction::ExtractValue) {
      ArrayRef<unsigned> Indices = CE->getIndices();
      if (Value *W = FindInsertedValue(CE->getOperand(0), Indices))
        if (W != V)
          return findValueImpl(W, OffsetOk, Visited);
    }
  }

  // As a last resort, try SimplifyInstruction or constant folding.
  if (Instruction *Inst = dyn_cast<Instruction>(V)) {
    if (Value *W = SimplifyInstruction(Inst, *DL, TLI, DT, AC))
      return findValueImpl(W, OffsetOk, Visited);
  } else if (auto *C = dyn_cast<Constant>(V)) {
    if (Value *W = ConstantFoldConstant(C, *DL, TLI))
      if (W && W != V)
        return findValueImpl(W, OffsetOk, Visited);
  }

  return V;
}
开发者ID:marsupial,项目名称:llvm,代码行数:67,代码来源:Lint.cpp

示例12: analyzeGlobalAux

static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
                             SmallPtrSet<const PHINode *, 16> &PhiUsers) {
  for (const Use &U : V->uses()) {
    const User *UR = U.getUser();
    if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(UR)) {
      GS.HasNonInstructionUser = true;

      // If the result of the constantexpr isn't pointer type, then we won't
      // know to expect it in various places.  Just reject early.
      if (!isa<PointerType>(CE->getType()))
        return true;

      if (analyzeGlobalAux(CE, GS, PhiUsers))
        return true;
    } else if (const Instruction *I = dyn_cast<Instruction>(UR)) {
      if (!GS.HasMultipleAccessingFunctions) {
        if (I->getParent() && I->getParent()->getParent()) {
          const Function *F = I->getParent()->getParent();

          if (!GS.AccessingFunction)
            GS.AccessingFunction = F;
          else if (GS.AccessingFunction != F)
            GS.HasMultipleAccessingFunctions = true;
        }
      }
      if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
        GS.IsLoaded = true;
        // Don't hack on volatile loads.
        if (LI->isVolatile())
          return true;
        GS.Ordering = strongerOrdering(GS.Ordering, LI->getOrdering());
      } else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) {
        // Don't allow a store OF the address, only stores TO the address.
        if (SI->getOperand(0) == V)
          return true;

        // Don't hack on volatile stores.
        if (SI->isVolatile())
          return true;

        GS.Ordering = strongerOrdering(GS.Ordering, SI->getOrdering());

        // If this is a direct store to the global (i.e., the global is a scalar
        // value, not an aggregate), keep more specific information about
        // stores.
        if (GS.StoredType != GlobalStatus::Stored) {
          if (const GlobalVariable *GV =
                  dyn_cast<GlobalVariable>(SI->getOperand(1))) {
            Value *StoredVal = SI->getOperand(0);

            if (Constant *C = dyn_cast<Constant>(StoredVal)) {
              if (C->isThreadDependent()) {
                // The stored value changes between threads; don't track it.
                return true;
              }
            }

            if (StoredVal == GV->getInitializer()) {
              if (GS.StoredType < GlobalStatus::InitializerStored)
                GS.StoredType = GlobalStatus::InitializerStored;
            } else if (isa<LoadInst>(StoredVal) &&
                       cast<LoadInst>(StoredVal)->getOperand(0) == GV) {
              if (GS.StoredType < GlobalStatus::InitializerStored)
                GS.StoredType = GlobalStatus::InitializerStored;
            } else if (GS.StoredType < GlobalStatus::StoredOnce) {
              GS.StoredType = GlobalStatus::StoredOnce;
              GS.StoredOnceValue = StoredVal;
            } else if (GS.StoredType == GlobalStatus::StoredOnce &&
                       GS.StoredOnceValue == StoredVal) {
              // noop.
            } else {
              GS.StoredType = GlobalStatus::Stored;
            }
          } else {
            GS.StoredType = GlobalStatus::Stored;
          }
        }
      } else if (isa<BitCastInst>(I)) {
        if (analyzeGlobalAux(I, GS, PhiUsers))
          return true;
      } else if (isa<GetElementPtrInst>(I)) {
        if (analyzeGlobalAux(I, GS, PhiUsers))
          return true;
      } else if (isa<SelectInst>(I)) {
        if (analyzeGlobalAux(I, GS, PhiUsers))
          return true;
      } else if (const PHINode *PN = dyn_cast<PHINode>(I)) {
        // PHI nodes we can check just like select or GEP instructions, but we
        // have to be careful about infinite recursion.
        if (PhiUsers.insert(PN)) // Not already visited.
          if (analyzeGlobalAux(I, GS, PhiUsers))
            return true;
      } else if (isa<CmpInst>(I)) {
        GS.IsCompared = true;
      } else if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(I)) {
        if (MTI->isVolatile())
          return true;
        if (MTI->getArgOperand(0) == V)
          GS.StoredType = GlobalStatus::Stored;
        if (MTI->getArgOperand(1) == V)
//.........这里部分代码省略.........
开发者ID:NoahSquare,项目名称:caver,代码行数:101,代码来源:GlobalStatus.cpp

示例13: HandlePHINodesInSuccessorBlocks

/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
/// Emit code to ensure constants are copied into registers when needed.
/// Remember the virtual registers that need to be added to the Machine PHI
/// nodes as input.  We cannot just directly add them, because expansion
/// might result in multiple MBB's for one BB.  As such, the start of the
/// BB might correspond to a different MBB than the end.
bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
  const TerminatorInst *TI = LLVMBB->getTerminator();

  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
  unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();

  // Check successor nodes' PHI nodes that expect a constant to be available
  // from this block.
  for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
    const BasicBlock *SuccBB = TI->getSuccessor(succ);
    if (!isa<PHINode>(SuccBB->begin())) continue;
    MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];

    // If this terminator has multiple identical successors (common for
    // switches), only handle each succ once.
    if (!SuccsHandled.insert(SuccMBB)) continue;

    MachineBasicBlock::iterator MBBI = SuccMBB->begin();

    // At this point we know that there is a 1-1 correspondence between LLVM PHI
    // nodes and Machine PHI nodes, but the incoming operands have not been
    // emitted yet.
    for (BasicBlock::const_iterator I = SuccBB->begin();
         const PHINode *PN = dyn_cast<PHINode>(I); ++I) {

      // Ignore dead phi's.
      if (PN->use_empty()) continue;

      // Only handle legal types. Two interesting things to note here. First,
      // by bailing out early, we may leave behind some dead instructions,
      // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
      // own moves. Second, this check is necessary because FastISel doesn't
      // use CreateRegs to create registers, so it always creates
      // exactly one register for each non-void instruction.
      EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
      if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
        // Promote MVT::i1.
        if (VT == MVT::i1)
          VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
        else {
          FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
          return false;
        }
      }

      const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);

      // Set the DebugLoc for the copy. Prefer the location of the operand
      // if there is one; use the location of the PHI otherwise.
      DL = PN->getDebugLoc();
      if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
        DL = Inst->getDebugLoc();

      unsigned Reg = getRegForValue(PHIOp);
      if (Reg == 0) {
        FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
        return false;
      }
      FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
      DL = DebugLoc();
    }
  }

  return true;
}
开发者ID:Sciumo,项目名称:llvm,代码行数:71,代码来源:FastISel.cpp

示例14: markAliveBlocks

static bool markAliveBlocks(BasicBlock *BB,
                            SmallPtrSet<BasicBlock*, 128> &Reachable) {

  SmallVector<BasicBlock*, 128> Worklist;
  Worklist.push_back(BB);
  Reachable.insert(BB);
  bool Changed = false;
  do {
    BB = Worklist.pop_back_val();

    // Do a quick scan of the basic block, turning any obviously unreachable
    // instructions into LLVM unreachable insts.  The instruction combining pass
    // canonicalizes unreachable insts into stores to null or undef.
    for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;++BBI){
      if (CallInst *CI = dyn_cast<CallInst>(BBI)) {
        if (CI->doesNotReturn()) {
          // If we found a call to a no-return function, insert an unreachable
          // instruction after it.  Make sure there isn't *already* one there
          // though.
          ++BBI;
          if (!isa<UnreachableInst>(BBI)) {
            // Don't insert a call to llvm.trap right before the unreachable.
            changeToUnreachable(BBI, false);
            Changed = true;
          }
          break;
        }
      }

      // Store to undef and store to null are undefined and used to signal that
      // they should be changed to unreachable by passes that can't modify the
      // CFG.
      if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
        // Don't touch volatile stores.
        if (SI->isVolatile()) continue;

        Value *Ptr = SI->getOperand(1);

        if (isa<UndefValue>(Ptr) ||
            (isa<ConstantPointerNull>(Ptr) &&
             SI->getPointerAddressSpace() == 0)) {
          changeToUnreachable(SI, true);
          Changed = true;
          break;
        }
      }
    }

    // Turn invokes that call 'nounwind' functions into ordinary calls.
    if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) {
      Value *Callee = II->getCalledValue();
      if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
        changeToUnreachable(II, true);
        Changed = true;
      } else if (II->doesNotThrow()) {
        if (II->use_empty() && II->onlyReadsMemory()) {
          // jump to the normal destination branch.
          BranchInst::Create(II->getNormalDest(), II);
          II->getUnwindDest()->removePredecessor(II->getParent());
          II->eraseFromParent();
        } else
          changeToCall(II);
        Changed = true;
      }
    }

    Changed |= ConstantFoldTerminator(BB, true);
    for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
      if (Reachable.insert(*SI))
        Worklist.push_back(*SI);
  } while (!Worklist.empty());
  return Changed;
}
开发者ID:32bitmicro,项目名称:llvm,代码行数:73,代码来源:SimplifyCFGPass.cpp

示例15: renameInit

/// Some initialization for the rename part
///
void SSI::renameInit(SmallPtrSet<Instruction*, 4> &value) {
  for (SmallPtrSet<Instruction*, 4>::iterator I = value.begin(),
       E = value.end(); I != E; ++I)
    value_stack[*I].push_back(*I);
}
开发者ID:AHelper,项目名称:llvm-z80-target,代码行数:7,代码来源:SSI.cpp


注:本文中的SmallPtrSet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。