本文整理汇总了C++中SmallVector::clear方法的典型用法代码示例。如果您正苦于以下问题:C++ SmallVector::clear方法的具体用法?C++ SmallVector::clear怎么用?C++ SmallVector::clear使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类SmallVector
的用法示例。
在下文中一共展示了SmallVector::clear方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: if
//.........这里部分代码省略.........
}
}
ReplaceUsesOfBlockWith(Succ, NMBB);
// If updateTerminator() removes instructions, we need to remove them from
// SlotIndexes.
SmallVector<MachineInstr*, 4> Terminators;
if (Indexes) {
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I)
Terminators.push_back(I);
}
updateTerminator();
if (Indexes) {
SmallVector<MachineInstr*, 4> NewTerminators;
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I)
NewTerminators.push_back(I);
for (SmallVectorImpl<MachineInstr*>::iterator I = Terminators.begin(),
E = Terminators.end(); I != E; ++I) {
if (std::find(NewTerminators.begin(), NewTerminators.end(), *I) ==
NewTerminators.end())
Indexes->removeMachineInstrFromMaps(*I);
}
}
// Insert unconditional "jump Succ" instruction in NMBB if necessary.
NMBB->addSuccessor(Succ);
if (!NMBB->isLayoutSuccessor(Succ)) {
Cond.clear();
MF->getSubtarget().getInstrInfo()->InsertBranch(*NMBB, Succ, nullptr, Cond,
dl);
if (Indexes) {
for (instr_iterator I = NMBB->instr_begin(), E = NMBB->instr_end();
I != E; ++I) {
// Some instructions may have been moved to NMBB by updateTerminator(),
// so we first remove any instruction that already has an index.
if (Indexes->hasIndex(I))
Indexes->removeMachineInstrFromMaps(I);
Indexes->insertMachineInstrInMaps(I);
}
}
}
// Fix PHI nodes in Succ so they refer to NMBB instead of this
for (MachineBasicBlock::instr_iterator
i = Succ->instr_begin(),e = Succ->instr_end();
i != e && i->isPHI(); ++i)
for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
if (i->getOperand(ni+1).getMBB() == this)
i->getOperand(ni+1).setMBB(NMBB);
// Inherit live-ins from the successor
for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
E = Succ->livein_end(); I != E; ++I)
NMBB->addLiveIn(*I);
// Update LiveVariables.
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
if (LV) {
// Restore kills of virtual registers that were killed by the terminators.
示例2: ParseCommandLineOptions
void cl::ParseCommandLineOptions(int argc, char **argv,
const char *Overview, bool ReadResponseFiles) {
// Process all registered options.
SmallVector<Option*, 4> PositionalOpts;
SmallVector<Option*, 4> SinkOpts;
StringMap<Option*> Opts;
GetOptionInfo(PositionalOpts, SinkOpts, Opts);
assert((!Opts.empty() || !PositionalOpts.empty()) &&
"No options specified!");
// Expand response files.
std::vector<char*> newArgv;
if (ReadResponseFiles) {
newArgv.push_back(strdup(argv[0]));
ExpandResponseFiles(argc, argv, newArgv);
argv = &newArgv[0];
argc = static_cast<int>(newArgv.size());
}
// Copy the program name into ProgName, making sure not to overflow it.
std::string ProgName = sys::path::filename(argv[0]);
size_t Len = std::min(ProgName.size(), size_t(79));
memcpy(ProgramName, ProgName.data(), Len);
ProgramName[Len] = '\0';
ProgramOverview = Overview;
bool ErrorParsing = false;
// Check out the positional arguments to collect information about them.
unsigned NumPositionalRequired = 0;
// Determine whether or not there are an unlimited number of positionals
bool HasUnlimitedPositionals = false;
Option *ConsumeAfterOpt = 0;
if (!PositionalOpts.empty()) {
if (PositionalOpts[0]->getNumOccurrencesFlag() == cl::ConsumeAfter) {
assert(PositionalOpts.size() > 1 &&
"Cannot specify cl::ConsumeAfter without a positional argument!");
ConsumeAfterOpt = PositionalOpts[0];
}
// Calculate how many positional values are _required_.
bool UnboundedFound = false;
for (size_t i = ConsumeAfterOpt != 0, e = PositionalOpts.size();
i != e; ++i) {
Option *Opt = PositionalOpts[i];
if (RequiresValue(Opt))
++NumPositionalRequired;
else if (ConsumeAfterOpt) {
// ConsumeAfter cannot be combined with "optional" positional options
// unless there is only one positional argument...
if (PositionalOpts.size() > 2)
ErrorParsing |=
Opt->error("error - this positional option will never be matched, "
"because it does not Require a value, and a "
"cl::ConsumeAfter option is active!");
} else if (UnboundedFound && !Opt->ArgStr[0]) {
// This option does not "require" a value... Make sure this option is
// not specified after an option that eats all extra arguments, or this
// one will never get any!
//
ErrorParsing |= Opt->error("error - option can never match, because "
"another positional argument will match an "
"unbounded number of values, and this option"
" does not require a value!");
}
UnboundedFound |= EatsUnboundedNumberOfValues(Opt);
}
HasUnlimitedPositionals = UnboundedFound || ConsumeAfterOpt;
}
// PositionalVals - A vector of "positional" arguments we accumulate into
// the process at the end.
//
SmallVector<std::pair<StringRef,unsigned>, 4> PositionalVals;
// If the program has named positional arguments, and the name has been run
// across, keep track of which positional argument was named. Otherwise put
// the positional args into the PositionalVals list...
Option *ActivePositionalArg = 0;
// Loop over all of the arguments... processing them.
bool DashDashFound = false; // Have we read '--'?
for (int i = 1; i < argc; ++i) {
Option *Handler = 0;
Option *NearestHandler = 0;
std::string NearestHandlerString;
StringRef Value;
StringRef ArgName = "";
// If the option list changed, this means that some command line
// option has just been registered or deregistered. This can occur in
// response to things like -load, etc. If this happens, rescan the options.
if (OptionListChanged) {
PositionalOpts.clear();
SinkOpts.clear();
Opts.clear();
GetOptionInfo(PositionalOpts, SinkOpts, Opts);
//.........这里部分代码省略.........
示例3: finalizeBundle
/// finalizeBundle - Finalize a machine instruction bundle which includes
/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
/// This routine adds a BUNDLE instruction to represent the bundle, it adds
/// IsInternalRead markers to MachineOperands which are defined inside the
/// bundle, and it copies externally visible defs and uses to the BUNDLE
/// instruction.
void llvm::finalizeBundle(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator FirstMI,
MachineBasicBlock::instr_iterator LastMI) {
assert(FirstMI != LastMI && "Empty bundle?");
MIBundleBuilder Bundle(MBB, FirstMI, LastMI);
MachineFunction &MF = *MBB.getParent();
const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
MachineInstrBuilder MIB =
BuildMI(MF, FirstMI->getDebugLoc(), TII->get(TargetOpcode::BUNDLE));
Bundle.prepend(MIB);
SmallVector<unsigned, 32> LocalDefs;
SmallSet<unsigned, 32> LocalDefSet;
SmallSet<unsigned, 8> DeadDefSet;
SmallSet<unsigned, 16> KilledDefSet;
SmallVector<unsigned, 8> ExternUses;
SmallSet<unsigned, 8> ExternUseSet;
SmallSet<unsigned, 8> KilledUseSet;
SmallSet<unsigned, 8> UndefUseSet;
SmallVector<MachineOperand*, 4> Defs;
for (; FirstMI != LastMI; ++FirstMI) {
for (unsigned i = 0, e = FirstMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = FirstMI->getOperand(i);
if (!MO.isReg())
continue;
if (MO.isDef()) {
Defs.push_back(&MO);
continue;
}
unsigned Reg = MO.getReg();
if (!Reg)
continue;
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
if (LocalDefSet.count(Reg)) {
MO.setIsInternalRead();
if (MO.isKill())
// Internal def is now killed.
KilledDefSet.insert(Reg);
} else {
if (ExternUseSet.insert(Reg).second) {
ExternUses.push_back(Reg);
if (MO.isUndef())
UndefUseSet.insert(Reg);
}
if (MO.isKill())
// External def is now killed.
KilledUseSet.insert(Reg);
}
}
for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
MachineOperand &MO = *Defs[i];
unsigned Reg = MO.getReg();
if (!Reg)
continue;
if (LocalDefSet.insert(Reg).second) {
LocalDefs.push_back(Reg);
if (MO.isDead()) {
DeadDefSet.insert(Reg);
}
} else {
// Re-defined inside the bundle, it's no longer killed.
KilledDefSet.erase(Reg);
if (!MO.isDead())
// Previously defined but dead.
DeadDefSet.erase(Reg);
}
if (!MO.isDead()) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
if (LocalDefSet.insert(SubReg).second)
LocalDefs.push_back(SubReg);
}
}
}
Defs.clear();
}
SmallSet<unsigned, 32> Added;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
if (Added.insert(Reg).second) {
// If it's not live beyond end of the bundle, mark it dead.
bool isDead = DeadDefSet.count(Reg) || KilledDefSet.count(Reg);
MIB.addReg(Reg, getDefRegState(true) | getDeadRegState(isDead) |
getImplRegState(true));
}
//.........这里部分代码省略.........
示例4: B
/// DoPromotion - This method actually performs the promotion of the specified
/// arguments, and returns the new function. At this point, we know that it's
/// safe to do so.
CallGraphNode *ArgPromotion::DoPromotion(Function *F,
SmallPtrSetImpl<Argument*> &ArgsToPromote,
SmallPtrSetImpl<Argument*> &ByValArgsToTransform) {
// Start by computing a new prototype for the function, which is the same as
// the old function, but has modified arguments.
FunctionType *FTy = F->getFunctionType();
std::vector<Type*> Params;
typedef std::set<std::pair<Type *, IndicesVector>> ScalarizeTable;
// ScalarizedElements - If we are promoting a pointer that has elements
// accessed out of it, keep track of which elements are accessed so that we
// can add one argument for each.
//
// Arguments that are directly loaded will have a zero element value here, to
// handle cases where there are both a direct load and GEP accesses.
//
std::map<Argument*, ScalarizeTable> ScalarizedElements;
// OriginalLoads - Keep track of a representative load instruction from the
// original function so that we can tell the alias analysis implementation
// what the new GEP/Load instructions we are inserting look like.
// We need to keep the original loads for each argument and the elements
// of the argument that are accessed.
std::map<std::pair<Argument*, IndicesVector>, LoadInst*> OriginalLoads;
// Attribute - Keep track of the parameter attributes for the arguments
// that we are *not* promoting. For the ones that we do promote, the parameter
// attributes are lost
SmallVector<AttributeSet, 8> AttributesVec;
const AttributeSet &PAL = F->getAttributes();
// Add any return attributes.
if (PAL.hasAttributes(AttributeSet::ReturnIndex))
AttributesVec.push_back(AttributeSet::get(F->getContext(),
PAL.getRetAttributes()));
// First, determine the new argument list
unsigned ArgIndex = 1;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
++I, ++ArgIndex) {
if (ByValArgsToTransform.count(&*I)) {
// Simple byval argument? Just add all the struct element types.
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
StructType *STy = cast<StructType>(AgTy);
Params.insert(Params.end(), STy->element_begin(), STy->element_end());
++NumByValArgsPromoted;
} else if (!ArgsToPromote.count(&*I)) {
// Unchanged argument
Params.push_back(I->getType());
AttributeSet attrs = PAL.getParamAttributes(ArgIndex);
if (attrs.hasAttributes(ArgIndex)) {
AttrBuilder B(attrs, ArgIndex);
AttributesVec.
push_back(AttributeSet::get(F->getContext(), Params.size(), B));
}
} else if (I->use_empty()) {
// Dead argument (which are always marked as promotable)
++NumArgumentsDead;
} else {
// Okay, this is being promoted. This means that the only uses are loads
// or GEPs which are only used by loads
// In this table, we will track which indices are loaded from the argument
// (where direct loads are tracked as no indices).
ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
for (User *U : I->users()) {
Instruction *UI = cast<Instruction>(U);
Type *SrcTy;
if (LoadInst *L = dyn_cast<LoadInst>(UI))
SrcTy = L->getType();
else
SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType();
IndicesVector Indices;
Indices.reserve(UI->getNumOperands() - 1);
// Since loads will only have a single operand, and GEPs only a single
// non-index operand, this will record direct loads without any indices,
// and gep+loads with the GEP indices.
for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
II != IE; ++II)
Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
// GEPs with a single 0 index can be merged with direct loads
if (Indices.size() == 1 && Indices.front() == 0)
Indices.clear();
ArgIndices.insert(std::make_pair(SrcTy, Indices));
LoadInst *OrigLoad;
if (LoadInst *L = dyn_cast<LoadInst>(UI))
OrigLoad = L;
else
// Take any load, we will use it only to update Alias Analysis
OrigLoad = cast<LoadInst>(UI->user_back());
OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad;
}
// Add a parameter to the function for each element passed in.
for (ScalarizeTable::iterator SI = ArgIndices.begin(),
//.........这里部分代码省略.........
示例5: getParent
MachineBasicBlock *
MachineBasicBlock::SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P) {
MachineFunction *MF = getParent();
DebugLoc dl; // FIXME: this is nowhere
// We may need to update this's terminator, but we can't do that if
// AnalyzeBranch fails. If this uses a jump table, we won't touch it.
const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
MachineBasicBlock *TBB = 0, *FBB = 0;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*this, TBB, FBB, Cond))
return NULL;
// Avoid bugpoint weirdness: A block may end with a conditional branch but
// jumps to the same MBB is either case. We have duplicate CFG edges in that
// case that we can't handle. Since this never happens in properly optimized
// code, just skip those edges.
if (TBB && TBB == FBB) {
DEBUG(dbgs() << "Won't split critical edge after degenerate BB#"
<< getNumber() << '\n');
return NULL;
}
MachineBasicBlock *NMBB = MF->CreateMachineBasicBlock();
MF->insert(llvm::next(MachineFunction::iterator(this)), NMBB);
DEBUG(dbgs() << "Splitting critical edge:"
" BB#" << getNumber()
<< " -- BB#" << NMBB->getNumber()
<< " -- BB#" << Succ->getNumber() << '\n');
// On some targets like Mips, branches may kill virtual registers. Make sure
// that LiveVariables is properly updated after updateTerminator replaces the
// terminators.
LiveVariables *LV = P->getAnalysisIfAvailable<LiveVariables>();
// Collect a list of virtual registers killed by the terminators.
SmallVector<unsigned, 4> KilledRegs;
if (LV)
for (instr_iterator I = getFirstInstrTerminator(), E = instr_end();
I != E; ++I) {
MachineInstr *MI = I;
for (MachineInstr::mop_iterator OI = MI->operands_begin(),
OE = MI->operands_end(); OI != OE; ++OI) {
if (!OI->isReg() || OI->getReg() == 0 ||
!OI->isUse() || !OI->isKill() || OI->isUndef())
continue;
unsigned Reg = OI->getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
LV->getVarInfo(Reg).removeKill(MI)) {
KilledRegs.push_back(Reg);
DEBUG(dbgs() << "Removing terminator kill: " << *MI);
OI->setIsKill(false);
}
}
}
ReplaceUsesOfBlockWith(Succ, NMBB);
updateTerminator();
// Insert unconditional "jump Succ" instruction in NMBB if necessary.
NMBB->addSuccessor(Succ);
if (!NMBB->isLayoutSuccessor(Succ)) {
Cond.clear();
MF->getTarget().getInstrInfo()->InsertBranch(*NMBB, Succ, NULL, Cond, dl);
}
// Fix PHI nodes in Succ so they refer to NMBB instead of this
for (MachineBasicBlock::instr_iterator
i = Succ->instr_begin(),e = Succ->instr_end();
i != e && i->isPHI(); ++i)
for (unsigned ni = 1, ne = i->getNumOperands(); ni != ne; ni += 2)
if (i->getOperand(ni+1).getMBB() == this)
i->getOperand(ni+1).setMBB(NMBB);
// Inherit live-ins from the successor
for (MachineBasicBlock::livein_iterator I = Succ->livein_begin(),
E = Succ->livein_end(); I != E; ++I)
NMBB->addLiveIn(*I);
// Update LiveVariables.
const TargetRegisterInfo *TRI = MF->getTarget().getRegisterInfo();
if (LV) {
// Restore kills of virtual registers that were killed by the terminators.
while (!KilledRegs.empty()) {
unsigned Reg = KilledRegs.pop_back_val();
for (instr_iterator I = instr_end(), E = instr_begin(); I != E;) {
if (!(--I)->addRegisterKilled(Reg, TRI, /* addIfNotFound= */ false))
continue;
if (TargetRegisterInfo::isVirtualRegister(Reg))
LV->getVarInfo(Reg).Kills.push_back(I);
DEBUG(dbgs() << "Restored terminator kill: " << *I);
break;
}
}
// Update relevant live-through information.
LV->addNewBlock(NMBB, this, Succ);
}
if (MachineDominatorTree *MDT =
P->getAnalysisIfAvailable<MachineDominatorTree>()) {
//.........这里部分代码省略.........
示例6: getExportedModules
void Module::getExportedModules(SmallVectorImpl<Module *> &Exported) const {
// All non-explicit submodules are exported.
for (std::vector<Module *>::const_iterator I = SubModules.begin(),
E = SubModules.end();
I != E; ++I) {
Module *Mod = *I;
if (!Mod->IsExplicit)
Exported.push_back(Mod);
}
// Find re-exported modules by filtering the list of imported modules.
bool AnyWildcard = false;
bool UnrestrictedWildcard = false;
SmallVector<Module *, 4> WildcardRestrictions;
for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
Module *Mod = Exports[I].getPointer();
if (!Exports[I].getInt()) {
// Export a named module directly; no wildcards involved.
Exported.push_back(Mod);
continue;
}
// Wildcard export: export all of the imported modules that match
// the given pattern.
AnyWildcard = true;
if (UnrestrictedWildcard)
continue;
if (Module *Restriction = Exports[I].getPointer())
WildcardRestrictions.push_back(Restriction);
else {
WildcardRestrictions.clear();
UnrestrictedWildcard = true;
}
}
// If there were any wildcards, push any imported modules that were
// re-exported by the wildcard restriction.
if (!AnyWildcard)
return;
for (unsigned I = 0, N = Imports.size(); I != N; ++I) {
Module *Mod = Imports[I];
bool Acceptable = UnrestrictedWildcard;
if (!Acceptable) {
// Check whether this module meets one of the restrictions.
for (unsigned R = 0, NR = WildcardRestrictions.size(); R != NR; ++R) {
Module *Restriction = WildcardRestrictions[R];
if (Mod == Restriction || Mod->isSubModuleOf(Restriction)) {
Acceptable = true;
break;
}
}
}
if (!Acceptable)
continue;
Exported.push_back(Mod);
}
}
示例7: main
//.........这里部分代码省略.........
argv.append(AppendedOpts.begin(), AppendedOpts.end());
}
}
std::set<std::string> SavedStrings;
// Handle CCC_OVERRIDE_OPTIONS, used for editing a command line behind the
// scenes.
if (const char *OverrideStr = ::getenv("CCC_OVERRIDE_OPTIONS")) {
// FIXME: Driver shouldn't take extra initial argument.
ApplyQAOverride(argv, OverrideStr, SavedStrings);
}
std::string Path = GetExecutablePath(argv[0], CanonicalPrefixes);
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts =
CreateAndPopulateDiagOpts(argv);
TextDiagnosticPrinter *DiagClient
= new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
FixupDiagPrefixExeName(DiagClient, Path);
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagClient);
if (!DiagOpts->DiagnosticSerializationFile.empty()) {
auto SerializedConsumer =
clang::serialized_diags::create(DiagOpts->DiagnosticSerializationFile,
&*DiagOpts, /*MergeChildRecords=*/true);
Diags.setClient(new ChainedDiagnosticConsumer(
Diags.takeClient(), std::move(SerializedConsumer)));
}
ProcessWarningOptions(Diags, *DiagOpts, /*ReportDiags=*/false);
Driver TheDriver(Path, llvm::sys::getDefaultTargetTriple(), Diags);
SetInstallDir(argv, TheDriver, CanonicalPrefixes);
llvm::InitializeAllTargets();
insertArgsFromProgramName(ProgName, DS, argv, SavedStrings);
SetBackdoorDriverOutputsFromEnvVars(TheDriver);
std::unique_ptr<Compilation> C(TheDriver.BuildCompilation(argv));
int Res = 0;
SmallVector<std::pair<int, const Command *>, 4> FailingCommands;
if (C.get())
Res = TheDriver.ExecuteCompilation(*C, FailingCommands);
// Force a crash to test the diagnostics.
if (::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH")) {
Diags.Report(diag::err_drv_force_crash) << "FORCE_CLANG_DIAGNOSTICS_CRASH";
// Pretend that every command failed.
FailingCommands.clear();
for (const auto &J : C->getJobs())
if (const Command *C = dyn_cast<Command>(&J))
FailingCommands.push_back(std::make_pair(-1, C));
}
for (const auto &P : FailingCommands) {
int CommandRes = P.first;
const Command *FailingCommand = P.second;
if (!Res)
Res = CommandRes;
// If result status is < 0, then the driver command signalled an error.
// If result status is 70, then the driver command reported a fatal error.
// On Windows, abort will return an exit code of 3. In these cases,
// generate additional diagnostic information if possible.
bool DiagnoseCrash = CommandRes < 0 || CommandRes == 70;
#ifdef LLVM_ON_WIN32
DiagnoseCrash |= CommandRes == 3;
#endif
if (DiagnoseCrash) {
TheDriver.generateCompilationDiagnostics(*C, *FailingCommand);
break;
}
}
Diags.getClient()->finish();
// If any timers were active but haven't been destroyed yet, print their
// results now. This happens in -disable-free mode.
llvm::TimerGroup::printAll(llvm::errs());
llvm::llvm_shutdown();
#ifdef LLVM_ON_WIN32
// Exit status should not be negative on Win32, unless abnormal termination.
// Once abnormal termiation was caught, negative status should not be
// propagated.
if (Res < 0)
Res = 1;
#endif
// If we have multiple failing commands, we return the result of the first
// failing command.
return Res;
}
示例8: performJobsImpl
int Compilation::performJobsImpl() {
// Create a TaskQueue for execution.
std::unique_ptr<TaskQueue> TQ;
if (SkipTaskExecution)
TQ.reset(new DummyTaskQueue(NumberOfParallelCommands));
else
TQ.reset(new TaskQueue(NumberOfParallelCommands));
PerformJobsState State;
using DependencyGraph = DependencyGraph<const Job *>;
DependencyGraph DepGraph;
SmallPtrSet<const Job *, 16> DeferredCommands;
SmallVector<const Job *, 16> InitialOutOfDateCommands;
DependencyGraph::MarkTracer ActualIncrementalTracer;
DependencyGraph::MarkTracer *IncrementalTracer = nullptr;
if (ShowIncrementalBuildDecisions)
IncrementalTracer = &ActualIncrementalTracer;
auto noteBuilding = [&] (const Job *cmd, StringRef reason) {
if (!ShowIncrementalBuildDecisions)
return;
if (State.ScheduledCommands.count(cmd))
return;
llvm::outs() << "Queuing "
<< llvm::sys::path::filename(cmd->getOutput().getBaseInput(0))
<< " " << reason << "\n";
IncrementalTracer->printPath(llvm::outs(), cmd,
[](raw_ostream &out, const Job *base) {
out << llvm::sys::path::filename(base->getOutput().getBaseInput(0));
});
};
// Set up scheduleCommandIfNecessaryAndPossible.
// This will only schedule the given command if it has not been scheduled
// and if all of its inputs are in FinishedCommands.
auto scheduleCommandIfNecessaryAndPossible = [&] (const Job *Cmd) {
if (State.ScheduledCommands.count(Cmd))
return;
if (auto Blocking = findUnfinishedJob(Cmd->getInputs(),
State.FinishedCommands)) {
State.BlockingCommands[Blocking].push_back(Cmd);
return;
}
assert(Cmd->getExtraEnvironment().empty() &&
"not implemented for compilations with multiple jobs");
State.ScheduledCommands.insert(Cmd);
TQ->addTask(Cmd->getExecutable(), Cmd->getArguments(), llvm::None,
(void *)Cmd);
};
// When a task finishes, we need to reevaluate the other commands that
// might have been blocked.
auto markFinished = [&] (const Job *Cmd) {
State.FinishedCommands.insert(Cmd);
auto BlockedIter = State.BlockingCommands.find(Cmd);
if (BlockedIter != State.BlockingCommands.end()) {
auto AllBlocked = std::move(BlockedIter->second);
State.BlockingCommands.erase(BlockedIter);
for (auto *Blocked : AllBlocked)
scheduleCommandIfNecessaryAndPossible(Blocked);
}
};
// Schedule all jobs we can.
for (const Job *Cmd : getJobs()) {
if (!getIncrementalBuildEnabled()) {
scheduleCommandIfNecessaryAndPossible(Cmd);
continue;
}
// Try to load the dependencies file for this job. If there isn't one, we
// always have to run the job, but it doesn't affect any other jobs. If
// there should be one but it's not present or can't be loaded, we have to
// run all the jobs.
// FIXME: We can probably do better here!
Job::Condition Condition = Job::Condition::Always;
StringRef DependenciesFile =
Cmd->getOutput().getAdditionalOutputForType(types::TY_SwiftDeps);
if (!DependenciesFile.empty()) {
if (Cmd->getCondition() == Job::Condition::NewlyAdded) {
DepGraph.addIndependentNode(Cmd);
} else {
switch (DepGraph.loadFromPath(Cmd, DependenciesFile)) {
case DependencyGraphImpl::LoadResult::HadError:
disableIncrementalBuild();
for (const Job *Cmd : DeferredCommands)
scheduleCommandIfNecessaryAndPossible(Cmd);
DeferredCommands.clear();
break;
case DependencyGraphImpl::LoadResult::UpToDate:
Condition = Cmd->getCondition();
break;
case DependencyGraphImpl::LoadResult::AffectsDownstream:
llvm_unreachable("we haven't marked anything in this graph yet");
}
//.........这里部分代码省略.........
示例9: Matcher
/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
/// mode of the machine to fold the specified instruction into a load or store
/// that ultimately uses it. However, the specified instruction has multiple
/// uses. Given this, it may actually increase register pressure to fold it
/// into the load. For example, consider this code:
///
/// X = ...
/// Y = X+1
/// use(Y) -> nonload/store
/// Z = Y+1
/// load Z
///
/// In this case, Y has multiple uses, and can be folded into the load of Z
/// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
/// be live at the use(Y) line. If we don't fold Y into load Z, we use one
/// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
/// number of computations either.
///
/// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
/// X was live across 'load Z' for other reasons, we actually *would* want to
/// fold the addressing mode in the Z case. This would make Y die earlier.
bool AddressingModeMatcher::
IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
ExtAddrMode &AMAfter) {
if (IgnoreProfitability) return true;
// AMBefore is the addressing mode before this instruction was folded into it,
// and AMAfter is the addressing mode after the instruction was folded. Get
// the set of registers referenced by AMAfter and subtract out those
// referenced by AMBefore: this is the set of values which folding in this
// address extends the lifetime of.
//
// Note that there are only two potential values being referenced here,
// BaseReg and ScaleReg (global addresses are always available, as are any
// folded immediates).
Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
// If the BaseReg or ScaledReg was referenced by the previous addrmode, their
// lifetime wasn't extended by adding this instruction.
if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
BaseReg = 0;
if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
ScaledReg = 0;
// If folding this instruction (and it's subexprs) didn't extend any live
// ranges, we're ok with it.
if (BaseReg == 0 && ScaledReg == 0)
return true;
// If all uses of this instruction are ultimately load/store/inlineasm's,
// check to see if their addressing modes will include this instruction. If
// so, we can fold it into all uses, so it doesn't matter if it has multiple
// uses.
SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
SmallPtrSet<Instruction*, 16> ConsideredInsts;
if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
return false; // Has a non-memory, non-foldable use!
// Now that we know that all uses of this instruction are part of a chain of
// computation involving only operations that could theoretically be folded
// into a memory use, loop over each of these uses and see if they could
// *actually* fold the instruction.
SmallVector<Instruction*, 32> MatchedAddrModeInsts;
for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
Instruction *User = MemoryUses[i].first;
unsigned OpNo = MemoryUses[i].second;
// Get the access type of this use. If the use isn't a pointer, we don't
// know what it accesses.
Value *Address = User->getOperand(OpNo);
if (!Address->getType()->isPointerTy())
return false;
const Type *AddressAccessTy =
cast<PointerType>(Address->getType())->getElementType();
// Do a match against the root of this address, ignoring profitability. This
// will tell us if the addressing mode for the memory operation will
// *actually* cover the shared instruction.
ExtAddrMode Result;
AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
MemoryInst, Result);
Matcher.IgnoreProfitability = true;
bool Success = Matcher.MatchAddr(Address, 0);
Success = Success; assert(Success && "Couldn't select *anything*?");
// If the match didn't cover I, then it won't be shared by it.
if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
I) == MatchedAddrModeInsts.end())
return false;
MatchedAddrModeInsts.clear();
}
return true;
}
示例10: simplifyLoopInst
//.........这里部分代码省略.........
// iteration over the loop and minimizes the possible causes for continuing to
// iterate.
LoopBlocksRPO RPOT(&L);
RPOT.perform(&LI);
MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
bool Changed = false;
for (;;) {
if (MSSAU && VerifyMemorySSA)
MSSA->verifyMemorySSA();
for (BasicBlock *BB : RPOT) {
for (Instruction &I : *BB) {
if (auto *PI = dyn_cast<PHINode>(&I))
VisitedPHIs.insert(PI);
if (I.use_empty()) {
if (isInstructionTriviallyDead(&I, &TLI))
DeadInsts.push_back(&I);
continue;
}
// We special case the first iteration which we can detect due to the
// empty `ToSimplify` set.
bool IsFirstIteration = ToSimplify->empty();
if (!IsFirstIteration && !ToSimplify->count(&I))
continue;
Value *V = SimplifyInstruction(&I, SQ.getWithInstruction(&I));
if (!V || !LI.replacementPreservesLCSSAForm(&I, V))
continue;
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
UI != UE;) {
Use &U = *UI++;
auto *UserI = cast<Instruction>(U.getUser());
U.set(V);
// If the instruction is used by a PHI node we have already processed
// we'll need to iterate on the loop body to converge, so add it to
// the next set.
if (auto *UserPI = dyn_cast<PHINode>(UserI))
if (VisitedPHIs.count(UserPI)) {
Next->insert(UserPI);
continue;
}
// If we are only simplifying targeted instructions and the user is an
// instruction in the loop body, add it to our set of targeted
// instructions. Because we process defs before uses (outside of PHIs)
// we won't have visited it yet.
//
// We also skip any uses outside of the loop being simplified. Those
// should always be PHI nodes due to LCSSA form, and we don't want to
// try to simplify those away.
assert((L.contains(UserI) || isa<PHINode>(UserI)) &&
"Uses outside the loop should be PHI nodes due to LCSSA!");
if (!IsFirstIteration && L.contains(UserI))
ToSimplify->insert(UserI);
}
if (MSSAU)
if (Instruction *SimpleI = dyn_cast_or_null<Instruction>(V))
if (MemoryAccess *MA = MSSA->getMemoryAccess(&I))
if (MemoryAccess *ReplacementMA = MSSA->getMemoryAccess(SimpleI))
MA->replaceAllUsesWith(ReplacementMA);
assert(I.use_empty() && "Should always have replaced all uses!");
if (isInstructionTriviallyDead(&I, &TLI))
DeadInsts.push_back(&I);
++NumSimplified;
Changed = true;
}
}
// Delete any dead instructions found thus far now that we've finished an
// iteration over all instructions in all the loop blocks.
if (!DeadInsts.empty()) {
Changed = true;
RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, &TLI, MSSAU);
}
if (MSSAU && VerifyMemorySSA)
MSSA->verifyMemorySSA();
// If we never found a PHI that needs to be simplified in the next
// iteration, we're done.
if (Next->empty())
break;
// Otherwise, put the next set in place for the next iteration and reset it
// and the visited PHIs for that iteration.
std::swap(Next, ToSimplify);
Next->clear();
VisitedPHIs.clear();
DeadInsts.clear();
}
return Changed;
}
示例11: runOnMachineFunction
//.........这里部分代码省略.........
unsigned Reg = MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
!ImpDefRegs.count(Reg)) {
// Delete all "local" implicit_def's. That include those which define
// physical registers since they cannot be liveout.
MI->eraseFromParent();
Changed = true;
continue;
}
// If there are multiple defs of the same register and at least one
// is not an implicit_def, do not insert implicit_def's before the
// uses.
bool Skip = false;
SmallVector<MachineInstr*, 4> DeadImpDefs;
for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(Reg),
DE = MRI->def_end(); DI != DE; ++DI) {
MachineInstr *DeadImpDef = &*DI;
if (!DeadImpDef->isImplicitDef()) {
Skip = true;
break;
}
DeadImpDefs.push_back(DeadImpDef);
}
if (Skip)
continue;
// The only implicit_def which we want to keep are those that are live
// out of its block.
for (unsigned j = 0, ee = DeadImpDefs.size(); j != ee; ++j)
DeadImpDefs[j]->eraseFromParent();
Changed = true;
// Process each use instruction once.
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
UE = MRI->use_end(); UI != UE; ++UI) {
if (UI.getOperand().isUndef())
continue;
MachineInstr *RMI = &*UI;
if (ModInsts.insert(RMI))
RUses.push_back(RMI);
}
for (unsigned i = 0, e = RUses.size(); i != e; ++i) {
MachineInstr *RMI = RUses[i];
// Turn a copy use into an implicit_def.
if (isUndefCopy(RMI, Reg, ImpDefRegs)) {
RMI->setDesc(TII->get(TargetOpcode::IMPLICIT_DEF));
bool isKill = false;
SmallVector<unsigned, 4> Ops;
for (unsigned j = 0, ee = RMI->getNumOperands(); j != ee; ++j) {
MachineOperand &RRMO = RMI->getOperand(j);
if (RRMO.isReg() && RRMO.getReg() == Reg) {
Ops.push_back(j);
if (RRMO.isKill())
isKill = true;
}
}
// Leave the other operands along.
for (unsigned j = 0, ee = Ops.size(); j != ee; ++j) {
unsigned OpIdx = Ops[j];
RMI->RemoveOperand(OpIdx-j);
}
// Update LiveVariables varinfo if the instruction is a kill.
if (isKill) {
LiveVariables::VarInfo& vi = LV->getVarInfo(Reg);
vi.removeKill(RMI);
}
continue;
}
// Replace Reg with a new vreg that's marked implicit.
const TargetRegisterClass* RC = MRI->getRegClass(Reg);
unsigned NewVReg = MRI->createVirtualRegister(RC);
bool isKill = true;
for (unsigned j = 0, ee = RMI->getNumOperands(); j != ee; ++j) {
MachineOperand &RRMO = RMI->getOperand(j);
if (RRMO.isReg() && RRMO.getReg() == Reg) {
RRMO.setReg(NewVReg);
RRMO.setIsUndef();
if (isKill) {
// Only the first operand of NewVReg is marked kill.
RRMO.setIsKill();
isKill = false;
}
}
}
}
RUses.clear();
ModInsts.clear();
}
ImpDefRegs.clear();
ImpDefMIs.clear();
}
return Changed;
}
示例12: if
/// ValueEnumerator - Enumerate module-level information.
ValueEnumerator::ValueEnumerator(const Module *M) {
// Enumerate the global variables.
for (Module::const_global_iterator I = M->global_begin(),
E = M->global_end(); I != E; ++I)
EnumerateValue(I);
// Enumerate the functions.
for (Module::const_iterator I = M->begin(), E = M->end(); I != E; ++I) {
EnumerateValue(I);
EnumerateAttributes(cast<Function>(I)->getAttributes());
}
// Enumerate the aliases.
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I)
EnumerateValue(I);
// Remember what is the cutoff between globalvalue's and other constants.
unsigned FirstConstant = Values.size();
// Enumerate the global variable initializers.
for (Module::const_global_iterator I = M->global_begin(),
E = M->global_end(); I != E; ++I)
if (I->hasInitializer())
EnumerateValue(I->getInitializer());
// Enumerate the aliasees.
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I)
EnumerateValue(I->getAliasee());
// Insert constants and metadata that are named at module level into the slot
// pool so that the module symbol table can refer to them...
EnumerateValueSymbolTable(M->getValueSymbolTable());
EnumerateNamedMetadata(M);
SmallVector<std::pair<unsigned, MDNode*>, 8> MDs;
// Enumerate types used by function bodies and argument lists.
for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) {
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I)
EnumerateType(I->getType());
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;++I){
for (User::const_op_iterator OI = I->op_begin(), E = I->op_end();
OI != E; ++OI) {
if (MDNode *MD = dyn_cast<MDNode>(*OI))
if (MD->isFunctionLocal() && MD->getFunction())
// These will get enumerated during function-incorporation.
continue;
EnumerateOperandType(*OI);
}
EnumerateType(I->getType());
if (const CallInst *CI = dyn_cast<CallInst>(I))
EnumerateAttributes(CI->getAttributes());
else if (const InvokeInst *II = dyn_cast<InvokeInst>(I))
EnumerateAttributes(II->getAttributes());
// Enumerate metadata attached with this instruction.
MDs.clear();
I->getAllMetadataOtherThanDebugLoc(MDs);
for (unsigned i = 0, e = MDs.size(); i != e; ++i)
EnumerateMetadata(MDs[i].second);
if (!I->getDebugLoc().isUnknown()) {
MDNode *Scope, *IA;
I->getDebugLoc().getScopeAndInlinedAt(Scope, IA, I->getContext());
if (Scope) EnumerateMetadata(Scope);
if (IA) EnumerateMetadata(IA);
}
}
}
// Optimize constant ordering.
OptimizeConstants(FirstConstant, Values.size());
}
示例13: Distribute
void ConnectedVNInfoEqClasses::Distribute(LiveInterval &LI, LiveInterval *LIV[],
MachineRegisterInfo &MRI) {
// Rewrite instructions.
for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg),
RE = MRI.reg_end(); RI != RE;) {
MachineOperand &MO = *RI;
MachineInstr *MI = RI->getParent();
++RI;
const VNInfo *VNI;
if (MI->isDebugValue()) {
// DBG_VALUE instructions don't have slot indexes, so get the index of
// the instruction before them. The value is defined there too.
SlotIndex Idx = LIS.getSlotIndexes()->getIndexBefore(*MI);
VNI = LI.Query(Idx).valueOut();
} else {
SlotIndex Idx = LIS.getInstructionIndex(*MI);
LiveQueryResult LRQ = LI.Query(Idx);
VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined();
}
// In the case of an <undef> use that isn't tied to any def, VNI will be
// NULL. If the use is tied to a def, VNI will be the defined value.
if (!VNI)
continue;
if (unsigned EqClass = getEqClass(VNI))
MO.setReg(LIV[EqClass-1]->reg);
}
// Distribute subregister liveranges.
if (LI.hasSubRanges()) {
unsigned NumComponents = EqClass.getNumClasses();
SmallVector<unsigned, 8> VNIMapping;
SmallVector<LiveInterval::SubRange*, 8> SubRanges;
BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
for (LiveInterval::SubRange &SR : LI.subranges()) {
// Create new subranges in the split intervals and construct a mapping
// for the VNInfos in the subrange.
unsigned NumValNos = SR.valnos.size();
VNIMapping.clear();
VNIMapping.reserve(NumValNos);
SubRanges.clear();
SubRanges.resize(NumComponents-1, nullptr);
for (unsigned I = 0; I < NumValNos; ++I) {
const VNInfo &VNI = *SR.valnos[I];
unsigned ComponentNum;
if (VNI.isUnused()) {
ComponentNum = 0;
} else {
const VNInfo *MainRangeVNI = LI.getVNInfoAt(VNI.def);
assert(MainRangeVNI != nullptr
&& "SubRange def must have corresponding main range def");
ComponentNum = getEqClass(MainRangeVNI);
if (ComponentNum > 0 && SubRanges[ComponentNum-1] == nullptr) {
SubRanges[ComponentNum-1]
= LIV[ComponentNum-1]->createSubRange(Allocator, SR.LaneMask);
}
}
VNIMapping.push_back(ComponentNum);
}
DistributeRange(SR, SubRanges.data(), VNIMapping);
}
LI.removeEmptySubRanges();
}
// Distribute main liverange.
DistributeRange(LI, LIV, EqClass);
}
示例14: runOnMachineFunction
bool HexagonExpandCondsets::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(*MF.getFunction()))
return false;
HII = static_cast<const HexagonInstrInfo*>(MF.getSubtarget().getInstrInfo());
TRI = MF.getSubtarget().getRegisterInfo();
MDT = &getAnalysis<MachineDominatorTree>();
LIS = &getAnalysis<LiveIntervals>();
MRI = &MF.getRegInfo();
DEBUG(LIS->print(dbgs() << "Before expand-condsets\n",
MF.getFunction()->getParent()));
bool Changed = false;
std::set<unsigned> CoalUpd, PredUpd;
SmallVector<MachineInstr*,16> Condsets;
for (auto &B : MF)
for (auto &I : B)
if (isCondset(I))
Condsets.push_back(&I);
// Try to coalesce the target of a mux with one of its sources.
// This could eliminate a register copy in some circumstances.
Changed |= coalesceSegments(Condsets, CoalUpd);
// Update kill flags on all source operands. This is done here because
// at this moment (when expand-condsets runs), there are no kill flags
// in the IR (they have been removed by live range analysis).
// Updating them right before we split is the easiest, because splitting
// adds definitions which would interfere with updating kills afterwards.
std::set<unsigned> KillUpd;
for (MachineInstr *MI : Condsets)
for (MachineOperand &Op : MI->operands())
if (Op.isReg() && Op.isUse())
if (!CoalUpd.count(Op.getReg()))
KillUpd.insert(Op.getReg());
updateLiveness(KillUpd, false, true, false);
DEBUG(LIS->print(dbgs() << "After coalescing\n",
MF.getFunction()->getParent()));
// First, simply split all muxes into a pair of conditional transfers
// and update the live intervals to reflect the new arrangement. The
// goal is to update the kill flags, since predication will rely on
// them.
for (MachineInstr *MI : Condsets)
Changed |= split(*MI, PredUpd);
Condsets.clear(); // The contents of Condsets are invalid here anyway.
// Do not update live ranges after splitting. Recalculation of live
// intervals removes kill flags, which were preserved by splitting on
// the source operands of condsets. These kill flags are needed by
// predication, and after splitting they are difficult to recalculate
// (because of predicated defs), so make sure they are left untouched.
// Predication does not use live intervals.
DEBUG(LIS->print(dbgs() << "After splitting\n",
MF.getFunction()->getParent()));
// Traverse all blocks and collapse predicable instructions feeding
// conditional transfers into predicated instructions.
// Walk over all the instructions again, so we may catch pre-existing
// cases that were not created in the previous step.
for (auto &B : MF)
Changed |= predicateInBlock(B, PredUpd);
DEBUG(LIS->print(dbgs() << "After predicating\n",
MF.getFunction()->getParent()));
PredUpd.insert(CoalUpd.begin(), CoalUpd.end());
updateLiveness(PredUpd, true, true, true);
DEBUG({
if (Changed)
LIS->print(dbgs() << "After expand-condsets\n",
MF.getFunction()->getParent());
});
示例15: main
//.........这里部分代码省略.........
if (OptLevelO2)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 0);
if (OptLevelOs)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 1);
if (OptLevelOz)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 2, 2);
if (OptLevelO3)
AddOptimizationPasses(Passes, *FPasses, TM.get(), 3, 0);
if (FPasses) {
FPasses->doInitialization();
for (Function &F : *M)
FPasses->run(F);
FPasses->doFinalization();
}
// Check that the module is well formed on completion of optimization
if (!NoVerify && !VerifyEach)
Passes.add(createVerifierPass());
// In run twice mode, we want to make sure the output is bit-by-bit
// equivalent if we run the pass manager again, so setup two buffers and
// a stream to write to them. Note that llc does something similar and it
// may be worth to abstract this out in the future.
SmallVector<char, 0> Buffer;
SmallVector<char, 0> CompileTwiceBuffer;
std::unique_ptr<raw_svector_ostream> BOS;
raw_ostream *OS = nullptr;
// Write bitcode or assembly to the output as the last step...
if (!NoOutput && !AnalyzeOnly) {
assert(Out);
OS = &Out->os();
if (RunTwice) {
BOS = make_unique<raw_svector_ostream>(Buffer);
OS = BOS.get();
}
if (OutputAssembly) {
if (EmitSummaryIndex)
report_fatal_error("Text output is incompatible with -module-summary");
if (EmitModuleHash)
report_fatal_error("Text output is incompatible with -module-hash");
Passes.add(createPrintModulePass(*OS, "", PreserveAssemblyUseListOrder));
} else if (OutputThinLTOBC)
Passes.add(createWriteThinLTOBitcodePass(
*OS, ThinLinkOut ? &ThinLinkOut->os() : nullptr));
else
Passes.add(createBitcodeWriterPass(*OS, PreserveBitcodeUseListOrder,
EmitSummaryIndex, EmitModuleHash));
}
// Before executing passes, print the final values of the LLVM options.
cl::PrintOptionValues();
// If requested, run all passes again with the same pass manager to catch
// bugs caused by persistent state in the passes
if (RunTwice) {
std::unique_ptr<Module> M2(CloneModule(M.get()));
Passes.run(*M2);
CompileTwiceBuffer = Buffer;
Buffer.clear();
}
// Now that we have all of the passes ready, run them.
Passes.run(*M);
// Compare the two outputs and make sure they're the same
if (RunTwice) {
assert(Out);
if (Buffer.size() != CompileTwiceBuffer.size() ||
(memcmp(Buffer.data(), CompileTwiceBuffer.data(), Buffer.size()) !=
0)) {
errs() << "Running the pass manager twice changed the output.\n"
"Writing the result of the second run to the specified output.\n"
"To generate the one-run comparison binary, just run without\n"
"the compile-twice option\n";
Out->os() << BOS->str();
Out->keep();
if (OptRemarkFile)
OptRemarkFile->keep();
return 1;
}
Out->os() << BOS->str();
}
// Declare success.
if (!NoOutput || PrintBreakpoints)
Out->keep();
if (OptRemarkFile)
OptRemarkFile->keep();
if (ThinLinkOut)
ThinLinkOut->keep();
return 0;
}