本文整理汇总了C++中cl::opt::getNumOccurrences方法的典型用法代码示例。如果您正苦于以下问题:C++ opt::getNumOccurrences方法的具体用法?C++ opt::getNumOccurrences怎么用?C++ opt::getNumOccurrences使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cl::opt
的用法示例。
在下文中一共展示了opt::getNumOccurrences方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: addIRPasses
void PPCPassConfig::addIRPasses() {
if (TM->getOptLevel() != CodeGenOpt::None)
addPass(createPPCBoolRetToIntPass());
addPass(createAtomicExpandPass(&getPPCTargetMachine()));
// For the BG/Q (or if explicitly requested), add explicit data prefetch
// intrinsics.
bool UsePrefetching = TM->getTargetTriple().getVendor() == Triple::BGQ &&
getOptLevel() != CodeGenOpt::None;
if (EnablePrefetch.getNumOccurrences() > 0)
UsePrefetching = EnablePrefetch;
if (UsePrefetching)
addPass(createPPCLoopDataPrefetchPass());
if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
// Call SeparateConstOffsetFromGEP pass to extract constants within indices
// and lower a GEP with multiple indices to either arithmetic operations or
// multiple GEPs with single index.
addPass(createSeparateConstOffsetFromGEPPass(TM, true));
// Call EarlyCSE pass to find and remove subexpressions in the lowered
// result.
addPass(createEarlyCSEPass());
// Do loop invariant code motion in case part of the lowered result is
// invariant.
addPass(createLICMPass());
}
TargetPassConfig::addIRPasses();
}
示例2: AddOptimizationPasses
/// This routine adds optimization passes based on selected optimization level,
/// OptLevel.
///
/// OptLevel - Optimization Level
static void AddOptimizationPasses(PassManagerBase &MPM,FunctionPassManager &FPM,
unsigned OptLevel, unsigned SizeLevel) {
FPM.add(createVerifierPass()); // Verify that input is correct
MPM.add(createDebugInfoVerifierPass()); // Verify that debug info is correct
PassManagerBuilder Builder;
Builder.OptLevel = OptLevel;
Builder.SizeLevel = SizeLevel;
if (DisableInline) {
// No inlining pass
} else if (OptLevel > 1) {
Builder.Inliner = createFunctionInliningPass(OptLevel, SizeLevel);
} else {
Builder.Inliner = createAlwaysInlinerPass();
}
Builder.DisableUnitAtATime = !UnitAtATime;
Builder.DisableUnrollLoops = (DisableLoopUnrolling.getNumOccurrences() > 0) ?
DisableLoopUnrolling : OptLevel == 0;
// This is final, unless there is a #pragma vectorize enable
if (DisableLoopVectorization)
Builder.LoopVectorize = false;
// If option wasn't forced via cmd line (-vectorize-loops, -loop-vectorize)
else if (!Builder.LoopVectorize)
Builder.LoopVectorize = OptLevel > 1 && SizeLevel < 2;
// When #pragma vectorize is on for SLP, do the same as above
Builder.SLPVectorize =
DisableSLPVectorization ? false : OptLevel > 1 && SizeLevel < 2;
Builder.populateFunctionPassManager(FPM);
Builder.populateModulePassManager(MPM);
}
示例3: main
int main(int argc, char **argv) {
llvm::cl::ParseCommandLineOptions(argc, argv);
if (Input.getNumOccurrences()) {
OwningPtr<MemoryBuffer> Buf;
if (MemoryBuffer::getFileOrSTDIN(Input, Buf))
return 1;
llvm::SourceMgr sm;
if (DumpTokens) {
yaml::dumpTokens(Buf->getBuffer(), outs());
}
if (DumpCanonical) {
yaml::Stream stream(Buf->getBuffer(), sm);
dumpStream(stream);
}
}
if (Verify) {
llvm::TimerGroup Group("YAML parser benchmark");
benchmark(Group, "Fast", createJSONText(10, 500));
} else if (!DumpCanonical && !DumpTokens) {
llvm::TimerGroup Group("YAML parser benchmark");
benchmark(Group, "Small Values", createJSONText(MemoryLimitMB, 5));
benchmark(Group, "Medium Values", createJSONText(MemoryLimitMB, 500));
benchmark(Group, "Large Values", createJSONText(MemoryLimitMB, 50000));
}
return 0;
}
示例4: AddOptimizationPasses
/// AddOptimizationPasses - This routine adds optimization passes
/// based on selected optimization level, OptLevel. This routine
/// duplicates llvm-gcc behaviour.
///
/// OptLevel - Optimization Level
static void AddOptimizationPasses(PassManagerBase &MPM,FunctionPassManager &FPM,
unsigned OptLevel, unsigned SizeLevel) {
FPM.add(createVerifierPass()); // Verify that input is correct
PassManagerBuilder Builder;
Builder.OptLevel = OptLevel;
Builder.SizeLevel = SizeLevel;
if (DisableInline) {
// No inlining pass
} else if (OptLevel > 1) {
unsigned Threshold = 225;
if (SizeLevel == 1) // -Os
Threshold = 75;
else if (SizeLevel == 2) // -Oz
Threshold = 25;
if (OptLevel > 2)
Threshold = 275;
Builder.Inliner = createFunctionInliningPass(Threshold);
} else {
Builder.Inliner = createAlwaysInlinerPass();
}
Builder.DisableUnitAtATime = !UnitAtATime;
Builder.DisableUnrollLoops = (DisableLoopUnrolling.getNumOccurrences() > 0) ?
DisableLoopUnrolling : OptLevel == 0;
Builder.LoopVectorize = OptLevel > 1 && SizeLevel < 2;
Builder.SLPVectorize = true;
Builder.populateFunctionPassManager(FPM);
Builder.populateModulePassManager(MPM);
}
示例5: getInlineThreshold
unsigned Inliner::getInlineThreshold(CallSite CS) const {
int thres = InlineThreshold; // -inline-threshold or else selected by
// overall opt level
// If -inline-threshold is not given, listen to the optsize attribute when it
// would decrease the threshold.
Function *Caller = CS.getCaller();
bool OptSize = Caller && !Caller->isDeclaration() &&
Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize);
if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
OptSizeThreshold < thres)
thres = OptSizeThreshold;
// Listen to the inlinehint attribute when it would increase the threshold
// and the caller does not need to minimize its size.
Function *Callee = CS.getCalledFunction();
bool InlineHint = Callee && !Callee->isDeclaration() &&
Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::InlineHint);
if (InlineHint && HintThreshold > thres
&& !Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::MinSize))
thres = HintThreshold;
// Listen to the cold attribute when it would decrease the threshold.
bool ColdCallee = Callee && !Callee->isDeclaration() &&
Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::Cold);
if (ColdCallee && ColdThreshold < thres)
thres = ColdThreshold;
return thres;
}
示例6: thinlto_create_codegen
thinlto_code_gen_t thinlto_create_codegen(void) {
lto_initialize();
ThinLTOCodeGenerator *CodeGen = new ThinLTOCodeGenerator();
CodeGen->setTargetOptions(InitTargetOptionsFromCodeGenFlags());
if (OptLevel.getNumOccurrences()) {
if (OptLevel < '0' || OptLevel > '3')
report_fatal_error("Optimization level must be between 0 and 3");
CodeGen->setOptLevel(OptLevel - '0');
switch (OptLevel) {
case '0':
CodeGen->setCodeGenOptLevel(CodeGenOpt::None);
break;
case '1':
CodeGen->setCodeGenOptLevel(CodeGenOpt::Less);
break;
case '2':
CodeGen->setCodeGenOptLevel(CodeGenOpt::Default);
break;
case '3':
CodeGen->setCodeGenOptLevel(CodeGenOpt::Aggressive);
break;
}
}
return wrap(CodeGen);
}
示例7: initFromString
// Helper function to handle -of, -od, etc.
static void initFromString(char*& dest, const cl::opt<std::string>& src) {
dest = 0;
if (src.getNumOccurrences() != 0) {
if (src.empty())
error("Expected argument to '-%s'", src.ArgStr);
dest = mem.strdup(src.c_str());
}
}
示例8: MachineFunctionPass
RegBankSelect::RegBankSelect(Mode RunningMode)
: MachineFunctionPass(ID), OptMode(RunningMode) {
initializeRegBankSelectPass(*PassRegistry::getPassRegistry());
if (RegBankSelectMode.getNumOccurrences() != 0) {
OptMode = RegBankSelectMode;
if (RegBankSelectMode != RunningMode)
LLVM_DEBUG(dbgs() << "RegBankSelect mode overrided by command line\n");
}
}
示例9: TheTarget
TargetMachine::TargetMachine(const Target &T, StringRef DataLayoutString,
const Triple &TT, StringRef CPU, StringRef FS,
const TargetOptions &Options)
: TheTarget(T), DL(DataLayoutString), TargetTriple(TT), TargetCPU(CPU),
TargetFS(FS), AsmInfo(nullptr), MRI(nullptr), MII(nullptr), STI(nullptr),
RequireStructuredCFG(false), Options(Options) {
if (EnableIPRA.getNumOccurrences())
this->Options.EnableIPRA = EnableIPRA;
}
示例10: getUnrollingPreferences
void BasicTTI::getUnrollingPreferences(Loop *L,
UnrollingPreferences &UP) const {
// This unrolling functionality is target independent, but to provide some
// motivation for its intended use, for x86:
// According to the Intel 64 and IA-32 Architectures Optimization Reference
// Manual, Intel Core models and later have a loop stream detector
// (and associated uop queue) that can benefit from partial unrolling.
// The relevant requirements are:
// - The loop must have no more than 4 (8 for Nehalem and later) branches
// taken, and none of them may be calls.
// - The loop can have no more than 18 (28 for Nehalem and later) uops.
// According to the Software Optimization Guide for AMD Family 15h Processors,
// models 30h-4fh (Steamroller and later) have a loop predictor and loop
// buffer which can benefit from partial unrolling.
// The relevant requirements are:
// - The loop must have fewer than 16 branches
// - The loop must have less than 40 uops in all executed loop branches
// The number of taken branches in a loop is hard to estimate here, and
// benchmarking has revealed that it is better not to be conservative when
// estimating the branch count. As a result, we'll ignore the branch limits
// until someone finds a case where it matters in practice.
unsigned MaxOps;
const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>();
if (PartialUnrollingThreshold.getNumOccurrences() > 0)
MaxOps = PartialUnrollingThreshold;
else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
else
return;
// Scan the loop: don't unroll loops with calls.
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
I != E; ++I) {
BasicBlock *BB = *I;
for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
ImmutableCallSite CS(J);
if (const Function *F = CS.getCalledFunction()) {
if (!TopTTI->isLoweredToCall(F))
continue;
}
return;
}
}
// Enable runtime and partial unrolling up to the specified size.
UP.Partial = UP.Runtime = true;
UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
}
示例11: GetCodeGenOptLevel
static CodeGenOpt::Level GetCodeGenOptLevel() {
if (CodeGenOptLevel.getNumOccurrences())
return static_cast<CodeGenOpt::Level>(unsigned(CodeGenOptLevel));
if (OptLevelO1)
return CodeGenOpt::Less;
if (OptLevelO2)
return CodeGenOpt::Default;
if (OptLevelO3)
return CodeGenOpt::Aggressive;
return CodeGenOpt::None;
}
示例12: processViewOptions
static void processViewOptions() {
if (!EnableAllViews.getNumOccurrences() &&
!EnableAllStats.getNumOccurrences())
return;
if (EnableAllViews.getNumOccurrences()) {
processOptionImpl(PrintSummaryView, EnableAllViews);
processOptionImpl(PrintResourcePressureView, EnableAllViews);
processOptionImpl(PrintTimelineView, EnableAllViews);
processOptionImpl(PrintInstructionInfoView, EnableAllViews);
}
const cl::opt<bool> &Default =
EnableAllViews.getPosition() < EnableAllStats.getPosition()
? EnableAllStats
: EnableAllViews;
processOptionImpl(PrintRegisterFileStats, Default);
processOptionImpl(PrintDispatchStats, Default);
processOptionImpl(PrintSchedulerStats, Default);
processOptionImpl(PrintRetireStats, Default);
}
示例13: getCacheLineSize
unsigned PPCTTIImpl::getCacheLineSize() {
// Check first if the user specified a custom line size.
if (CacheLineSize.getNumOccurrences() > 0)
return CacheLineSize;
// On P7, P8 or P9 we have a cache line size of 128.
unsigned Directive = ST->getDarwinDirective();
if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 ||
Directive == PPC::DIR_PWR9)
return 128;
// On other processors return a default of 64 bytes.
return 64;
}
示例14: AMDGPUTargetMachine
R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
TargetOptions Options,
Optional<Reloc::Model> RM,
Optional<CodeModel::Model> CM,
CodeGenOpt::Level OL, bool JIT)
: AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
setRequiresStructuredCFG(true);
// Override the default since calls aren't supported for r600.
if (EnableFunctionCalls &&
EnableAMDGPUFunctionCallsOpt.getNumOccurrences() == 0)
EnableFunctionCalls = false;
}
示例15: canProfitablyUnrollMultiExitLoop
/// Returns true if we can profitably unroll the multi-exit loop L. Currently,
/// we return true only if UnrollRuntimeMultiExit is set to true.
static bool canProfitablyUnrollMultiExitLoop(
Loop *L, SmallVectorImpl<BasicBlock *> &OtherExits, BasicBlock *LatchExit,
bool PreserveLCSSA, bool UseEpilogRemainder) {
#if !defined(NDEBUG)
SmallVector<BasicBlock *, 8> OtherExitsDummyCheck;
assert(canSafelyUnrollMultiExitLoop(L, OtherExitsDummyCheck, LatchExit,
PreserveLCSSA, UseEpilogRemainder) &&
"Should be safe to unroll before checking profitability!");
#endif
// Priority goes to UnrollRuntimeMultiExit if it's supplied.
return UnrollRuntimeMultiExit.getNumOccurrences() ? UnrollRuntimeMultiExit
: false;
}