本文整理汇总了C++中ArrayRef类的典型用法代码示例。如果您正苦于以下问题:C++ ArrayRef类的具体用法?C++ ArrayRef怎么用?C++ ArrayRef使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ArrayRef类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: while
/// Look through operations that will be free to find the earliest source of
/// this value.
///
/// @param ValLoc If V has aggegate type, we will be interested in a particular
/// scalar component. This records its address; the reverse of this list gives a
/// sequence of indices appropriate for an extractvalue to locate the important
/// value. This value is updated during the function and on exit will indicate
/// similar information for the Value returned.
///
/// @param DataBits If this function looks through truncate instructions, this
/// will record the smallest size attained.
static const Value *getNoopInput(const Value *V,
SmallVectorImpl<unsigned> &ValLoc,
unsigned &DataBits,
const TargetLoweringBase &TLI) {
while (true) {
// Try to look through V1; if V1 is not an instruction, it can't be looked
// through.
const Instruction *I = dyn_cast<Instruction>(V);
if (!I || I->getNumOperands() == 0) return V;
const Value *NoopInput = nullptr;
Value *Op = I->getOperand(0);
if (isa<BitCastInst>(I)) {
// Look through truly no-op bitcasts.
if (isNoopBitcast(Op->getType(), I->getType(), TLI))
NoopInput = Op;
} else if (isa<GetElementPtrInst>(I)) {
// Look through getelementptr
if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
NoopInput = Op;
} else if (isa<IntToPtrInst>(I)) {
// Look through inttoptr.
// Make sure this isn't a truncating or extending cast. We could
// support this eventually, but don't bother for now.
if (!isa<VectorType>(I->getType()) &&
TLI.getPointerTy().getSizeInBits() ==
cast<IntegerType>(Op->getType())->getBitWidth())
NoopInput = Op;
} else if (isa<PtrToIntInst>(I)) {
// Look through ptrtoint.
// Make sure this isn't a truncating or extending cast. We could
// support this eventually, but don't bother for now.
if (!isa<VectorType>(I->getType()) &&
TLI.getPointerTy().getSizeInBits() ==
cast<IntegerType>(I->getType())->getBitWidth())
NoopInput = Op;
} else if (isa<TruncInst>(I) &&
TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
NoopInput = Op;
} else if (isa<CallInst>(I)) {
// Look through call (skipping callee)
for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
i != e; ++i) {
unsigned attrInd = i - I->op_begin() + 1;
if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
NoopInput = *i;
break;
}
}
} else if (isa<InvokeInst>(I)) {
// Look through invoke (skipping BB, BB, Callee)
for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
i != e; ++i) {
unsigned attrInd = i - I->op_begin() + 1;
if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
NoopInput = *i;
break;
}
}
} else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
// Value may come from either the aggregate or the scalar
ArrayRef<unsigned> InsertLoc = IVI->getIndices();
if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(),
ValLoc.rbegin())) {
// The type being inserted is a nested sub-type of the aggregate; we
// have to remove those initial indices to get the location we're
// interested in for the operand.
ValLoc.resize(ValLoc.size() - InsertLoc.size());
NoopInput = IVI->getInsertedValueOperand();
} else {
// The struct we're inserting into has the value we're interested in, no
// change of address.
NoopInput = Op;
}
} else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
// The part we're interested in will inevitably be some sub-section of the
// previous aggregate. Combine the two paths to obtain the true address of
// our element.
ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
std::copy(ExtractLoc.rbegin(), ExtractLoc.rend(),
std::back_inserter(ValLoc));
NoopInput = Op;
}
// Terminate if we couldn't find anything to look through.
if (!NoopInput)
return V;
//.........这里部分代码省略.........
示例2: containsReg
/// \brief Convenient wrapper for checking membership in RegisterOperands.
/// (std::count() doesn't have an early exit).
static bool containsReg(ArrayRef<unsigned> RegUnits, unsigned RegUnit) {
return std::find(RegUnits.begin(), RegUnits.end(), RegUnit) != RegUnits.end();
}
示例3: decreaseRegPressure
/// Simply decrease the current pressure as impacted by these registers.
void RegPressureTracker::decreaseRegPressure(ArrayRef<unsigned> RegUnits) {
for (unsigned I = 0, E = RegUnits.size(); I != E; ++I)
decreaseSetPressure(CurrSetPressure, MRI->getPressureSets(RegUnits[I]));
}
示例4: emitModuleFlags
/// emitModuleFlags - Perform code emission for module flags.
void TargetLoweringObjectFileMachO::
emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
Mangler &Mang, const TargetMachine &TM) const {
unsigned VersionVal = 0;
unsigned ImageInfoFlags = 0;
MDNode *LinkerOptions = nullptr;
StringRef SectionVal;
for (ArrayRef<Module::ModuleFlagEntry>::iterator
i = ModuleFlags.begin(), e = ModuleFlags.end(); i != e; ++i) {
const Module::ModuleFlagEntry &MFE = *i;
// Ignore flags with 'Require' behavior.
if (MFE.Behavior == Module::Require)
continue;
StringRef Key = MFE.Key->getString();
Metadata *Val = MFE.Val;
if (Key == "Objective-C Image Info Version") {
VersionVal = mdconst::extract<ConstantInt>(Val)->getZExtValue();
} else if (Key == "Objective-C Garbage Collection" ||
Key == "Objective-C GC Only" ||
Key == "Objective-C Is Simulated" ||
Key == "Objective-C Image Swift Version") {
ImageInfoFlags |= mdconst::extract<ConstantInt>(Val)->getZExtValue();
} else if (Key == "Objective-C Image Info Section") {
SectionVal = cast<MDString>(Val)->getString();
} else if (Key == "Linker Options") {
LinkerOptions = cast<MDNode>(Val);
}
}
// Emit the linker options if present.
if (LinkerOptions) {
for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) {
MDNode *MDOptions = cast<MDNode>(LinkerOptions->getOperand(i));
SmallVector<std::string, 4> StrOptions;
// Convert to strings.
for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) {
MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii));
StrOptions.push_back(MDOption->getString());
}
Streamer.EmitLinkerOptions(StrOptions);
}
}
// The section is mandatory. If we don't have it, then we don't have GC info.
if (SectionVal.empty()) return;
StringRef Segment, Section;
unsigned TAA = 0, StubSize = 0;
bool TAAParsed;
std::string ErrorCode =
MCSectionMachO::ParseSectionSpecifier(SectionVal, Segment, Section,
TAA, TAAParsed, StubSize);
if (!ErrorCode.empty())
// If invalid, report the error with report_fatal_error.
report_fatal_error("Invalid section specifier '" + Section + "': " +
ErrorCode + ".");
// Get the section.
const MCSectionMachO *S =
getContext().getMachOSection(Segment, Section, TAA, StubSize,
SectionKind::getDataNoRel());
Streamer.SwitchSection(S);
Streamer.EmitLabel(getContext().
GetOrCreateSymbol(StringRef("L_OBJC_IMAGE_INFO")));
Streamer.EmitIntValue(VersionVal, 4);
Streamer.EmitIntValue(ImageInfoFlags, 4);
Streamer.AddBlankLine();
}
示例5: switch
void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
switch (getKind()) {
case APValue::Uninitialized:
Out << "<uninitialized>";
return;
case APValue::Int:
if (Ty->isBooleanType())
Out << (getInt().getBoolValue() ? "true" : "false");
else
Out << getInt();
return;
case APValue::Float:
Out << GetApproxValue(getFloat());
return;
case APValue::Vector: {
Out << '{';
QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
getVectorElt(0).printPretty(Out, Ctx, ElemTy);
for (unsigned i = 1; i != getVectorLength(); ++i) {
Out << ", ";
getVectorElt(i).printPretty(Out, Ctx, ElemTy);
}
Out << '}';
return;
}
case APValue::ComplexInt:
Out << getComplexIntReal() << "+" << getComplexIntImag() << "i";
return;
case APValue::ComplexFloat:
Out << GetApproxValue(getComplexFloatReal()) << "+"
<< GetApproxValue(getComplexFloatImag()) << "i";
return;
case APValue::LValue: {
LValueBase Base = getLValueBase();
if (!Base) {
Out << "0";
return;
}
bool IsReference = Ty->isReferenceType();
QualType InnerTy
= IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();
if (!hasLValuePath()) {
// No lvalue path: just print the offset.
CharUnits O = getLValueOffset();
CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
if (!O.isZero()) {
if (IsReference)
Out << "*(";
if (O % S) {
Out << "(char*)";
S = CharUnits::One();
}
Out << '&';
} else if (!IsReference)
Out << '&';
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
Out << *VD;
else
Base.get<const Expr*>()->printPretty(Out, 0, Ctx.getPrintingPolicy());
if (!O.isZero()) {
Out << " + " << (O / S);
if (IsReference)
Out << ')';
}
return;
}
// We have an lvalue path. Print it out nicely.
if (!IsReference)
Out << '&';
else if (isLValueOnePastTheEnd())
Out << "*(&";
QualType ElemTy;
if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
Out << *VD;
ElemTy = VD->getType();
} else {
const Expr *E = Base.get<const Expr*>();
E->printPretty(Out, 0, Ctx.getPrintingPolicy());
ElemTy = E->getType();
}
ArrayRef<LValuePathEntry> Path = getLValuePath();
const CXXRecordDecl *CastToBase = 0;
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
if (ElemTy->getAs<RecordType>()) {
// The lvalue refers to a class type, so the next path entry is a base
// or member.
const Decl *BaseOrMember =
BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
CastToBase = RD;
ElemTy = Ctx.getRecordType(RD);
} else {
const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
Out << ".";
//.........这里部分代码省略.........
示例6: performCompile
/// Performs the compile requested by the user.
/// \param Instance Will be reset after performIRGeneration when the verifier
/// mode is NoVerify and there were no errors.
/// \returns true on error
static bool performCompile(std::unique_ptr<CompilerInstance> &Instance,
CompilerInvocation &Invocation,
ArrayRef<const char *> Args,
int &ReturnValue,
FrontendObserver *observer) {
FrontendOptions opts = Invocation.getFrontendOptions();
FrontendOptions::ActionType Action = opts.RequestedAction;
// We've been asked to precompile a bridging header; we want to
// avoid touching any other inputs and just parse, emit and exit.
if (Action == FrontendOptions::EmitPCH) {
auto clangImporter = static_cast<ClangImporter *>(
Instance->getASTContext().getClangModuleLoader());
return clangImporter->emitBridgingPCH(
Invocation.getInputFilenames()[0], opts.getSingleOutputFilename());
}
IRGenOptions &IRGenOpts = Invocation.getIRGenOptions();
bool inputIsLLVMIr = Invocation.getInputKind() == InputFileKind::IFK_LLVM_IR;
if (inputIsLLVMIr) {
auto &LLVMContext = getGlobalLLVMContext();
// Load in bitcode file.
assert(Invocation.getInputFilenames().size() == 1 &&
"We expect a single input for bitcode input!");
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileBufOrErr =
llvm::MemoryBuffer::getFileOrSTDIN(Invocation.getInputFilenames()[0]);
if (!FileBufOrErr) {
Instance->getASTContext().Diags.diagnose(SourceLoc(),
diag::error_open_input_file,
Invocation.getInputFilenames()[0],
FileBufOrErr.getError().message());
return true;
}
llvm::MemoryBuffer *MainFile = FileBufOrErr.get().get();
llvm::SMDiagnostic Err;
std::unique_ptr<llvm::Module> Module = llvm::parseIR(
MainFile->getMemBufferRef(),
Err, LLVMContext);
if (!Module) {
// TODO: Translate from the diagnostic info to the SourceManager location
// if available.
Instance->getASTContext().Diags.diagnose(SourceLoc(),
diag::error_parse_input_file,
Invocation.getInputFilenames()[0],
Err.getMessage());
return true;
}
// TODO: remove once the frontend understands what action it should perform
IRGenOpts.OutputKind = getOutputKind(Action);
return performLLVM(IRGenOpts, Instance->getASTContext(), Module.get());
}
ReferencedNameTracker nameTracker;
bool shouldTrackReferences = !opts.ReferenceDependenciesFilePath.empty();
if (shouldTrackReferences)
Instance->setReferencedNameTracker(&nameTracker);
if (Action == FrontendOptions::Parse ||
Action == FrontendOptions::DumpParse ||
Action == FrontendOptions::DumpInterfaceHash)
Instance->performParseOnly();
else
Instance->performSema();
if (Action == FrontendOptions::Parse)
return Instance->getASTContext().hadError();
if (observer) {
observer->performedSemanticAnalysis(*Instance);
}
FrontendOptions::DebugCrashMode CrashMode = opts.CrashMode;
if (CrashMode == FrontendOptions::DebugCrashMode::AssertAfterParse)
debugFailWithAssertion();
else if (CrashMode == FrontendOptions::DebugCrashMode::CrashAfterParse)
debugFailWithCrash();
ASTContext &Context = Instance->getASTContext();
if (Action == FrontendOptions::REPL) {
runREPL(*Instance, ProcessCmdLine(Args.begin(), Args.end()),
Invocation.getParseStdlib());
return Context.hadError();
}
SourceFile *PrimarySourceFile = Instance->getPrimarySourceFile();
// We've been told to dump the AST (either after parsing or type-checking,
// which is already differentiated in CompilerInstance::performSema()),
// so dump or print the main source file and return.
if (Action == FrontendOptions::DumpParse ||
//.........这里部分代码省略.........
示例7: emit
void MaterializeForSetEmitter::emit(SILGenFunction &gen, ManagedValue self,
SILValue resultBuffer,
SILValue callbackBuffer,
ArrayRef<ManagedValue> indices) {
SILLocation loc = Witness;
loc.markAutoGenerated();
// If there's an abstraction difference, we always need to use the
// get/set pattern.
AccessStrategy strategy;
if (WitnessStorage->getType()->is<ReferenceStorageType>() ||
(Conformance && RequirementStorageType != WitnessStorageType)) {
strategy = AccessStrategy::DispatchToAccessor;
} else {
strategy = WitnessStorage->getAccessStrategy(TheAccessSemantics,
AccessKind::ReadWrite);
}
// Handle the indices.
RValue indicesRV;
if (isa<SubscriptDecl>(WitnessStorage)) {
indicesRV = collectIndicesFromParameters(gen, loc, indices);
} else {
assert(indices.empty() && "indices for a non-subscript?");
}
// As above, assume that we don't need to reabstract 'self'.
// Choose the right implementation.
SILValue address;
SILFunction *callbackFn = nullptr;
switch (strategy) {
case AccessStrategy::Storage:
address = emitUsingStorage(gen, loc, self, std::move(indicesRV));
break;
case AccessStrategy::Addressor:
address = emitUsingAddressor(gen, loc, self, std::move(indicesRV),
callbackBuffer, callbackFn);
break;
case AccessStrategy::DirectToAccessor:
case AccessStrategy::DispatchToAccessor:
address = emitUsingGetterSetter(gen, loc, self, std::move(indicesRV),
resultBuffer, callbackBuffer, callbackFn);
break;
}
// Return the address as a Builtin.RawPointer.
SILType rawPointerTy = SILType::getRawPointerType(gen.getASTContext());
address = gen.B.createAddressToPointer(loc, address, rawPointerTy);
SILType resultTupleTy = gen.F.mapTypeIntoContext(
gen.F.getLoweredFunctionType()->getSILResult());
SILType optCallbackTy = resultTupleTy.getTupleElementType(1);
// Form the callback.
SILValue callback;
if (callbackFn) {
// Make a reference to the function.
callback = gen.B.createFunctionRef(loc, callbackFn);
// If it's polymorphic, cast to RawPointer and then back to the
// right monomorphic type. The safety of this cast relies on some
// assumptions about what exactly IRGen can reconstruct from the
// callback's thick type argument.
if (callbackFn->getLoweredFunctionType()->isPolymorphic()) {
callback = gen.B.createThinFunctionToPointer(loc, callback, rawPointerTy);
OptionalTypeKind optKind;
auto callbackTy = optCallbackTy.getAnyOptionalObjectType(SGM.M, optKind);
callback = gen.B.createPointerToThinFunction(loc, callback, callbackTy);
}
callback = gen.B.createOptionalSome(loc, callback, optCallbackTy);
} else {
callback = gen.B.createOptionalNone(loc, optCallbackTy);
}
// Form the result and return.
auto result = gen.B.createTuple(loc, resultTupleTy, { address, callback });
gen.Cleanups.emitCleanupsForReturn(CleanupLocation::get(loc));
gen.B.createReturn(loc, result);
}
示例8: setClauses
void OMPExecutableDirective::setClauses(ArrayRef<OMPClause *> Clauses) {
assert(Clauses.size() == getNumClauses() &&
"Number of clauses is not the same as the preallocated buffer");
std::copy(Clauses.begin(), Clauses.end(), getClauses().begin());
}
示例9: analyze
/// This function goes through the arguments of F and sees if we have anything
/// to optimize in which case it returns true. If we have nothing to optimize,
/// it returns false.
bool FunctionAnalyzer::analyze() {
// For now ignore functions with indirect results.
if (F->getLoweredFunctionType()->hasIndirectResult())
return false;
ArrayRef<SILArgument *> Args = F->begin()->getBBArgs();
// A map from consumed SILArguments to the release associated with an
// argument.
ConsumedArgToEpilogueReleaseMatcher ArgToReturnReleaseMap(RCIA, F);
ConsumedArgToEpilogueReleaseMatcher ArgToThrowReleaseMap(
RCIA, F, ConsumedArgToEpilogueReleaseMatcher::ExitKind::Throw);
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
ArgumentDescriptor A(Allocator, Args[i]);
bool HaveOptimizedArg = false;
bool isABIRequired = isArgumentABIRequired(Args[i]);
auto OnlyRelease = getNonTrivialNonDebugReleaseUse(Args[i]);
// If this argument is not ABI required and has not uses except for debug
// instructions, remove it.
if (!isABIRequired && OnlyRelease && OnlyRelease.getValue().isNull()) {
A.IsDead = true;
HaveOptimizedArg = true;
++NumDeadArgsEliminated;
}
// See if we can find a ref count equivalent strong_release or release_value
// at the end of this function if our argument is an @owned parameter.
if (A.hasConvention(ParameterConvention::Direct_Owned)) {
if (auto *Release = ArgToReturnReleaseMap.releaseForArgument(A.Arg)) {
SILInstruction *ReleaseInThrow = nullptr;
// If the function has a throw block we must also find a matching
// release in the throw block.
if (!ArgToThrowReleaseMap.hasBlock() ||
(ReleaseInThrow = ArgToThrowReleaseMap.releaseForArgument(A.Arg))) {
// TODO: accept a second release in the throw block to let the
// argument be dead.
if (OnlyRelease && OnlyRelease.getValue().getPtrOrNull() == Release) {
A.IsDead = true;
}
A.CalleeRelease = Release;
A.CalleeReleaseInThrowBlock = ReleaseInThrow;
HaveOptimizedArg = true;
++NumOwnedConvertedToGuaranteed;
}
}
}
if (A.shouldExplode()) {
HaveOptimizedArg = true;
++NumSROAArguments;
}
if (HaveOptimizedArg) {
ShouldOptimize = true;
// Store that we have modified the self argument. We need to change the
// calling convention later.
if (Args[i]->isSelf())
HaveModifiedSelfArgument = true;
}
// Add the argument to our list.
ArgDescList.push_back(std::move(A));
}
return ShouldOptimize;
}
示例10: AttrNonNull
void NonNullParamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
const Decl *FD = Call.getDecl();
if (!FD)
return;
// Merge all non-null attributes
unsigned NumArgs = Call.getNumArgs();
llvm::SmallBitVector AttrNonNull(NumArgs);
for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
if (!NonNull->args_size()) {
AttrNonNull.set(0, NumArgs);
break;
}
for (unsigned Val : NonNull->args()) {
if (Val >= NumArgs)
continue;
AttrNonNull.set(Val);
}
}
ProgramStateRef state = C.getState();
CallEvent::param_type_iterator TyI = Call.param_type_begin(),
TyE = Call.param_type_end();
for (unsigned idx = 0; idx < NumArgs; ++idx) {
// Check if the parameter is a reference. We want to report when reference
// to a null pointer is passed as a paramter.
bool haveRefTypeParam = false;
if (TyI != TyE) {
haveRefTypeParam = (*TyI)->isReferenceType();
TyI++;
}
bool haveAttrNonNull = AttrNonNull[idx];
if (!haveAttrNonNull) {
// Check if the parameter is also marked 'nonnull'.
ArrayRef<ParmVarDecl*> parms = Call.parameters();
if (idx < parms.size())
haveAttrNonNull = parms[idx]->hasAttr<NonNullAttr>();
}
if (!haveRefTypeParam && !haveAttrNonNull)
continue;
// If the value is unknown or undefined, we can't perform this check.
const Expr *ArgE = Call.getArgExpr(idx);
SVal V = Call.getArgSVal(idx);
Optional<DefinedSVal> DV = V.getAs<DefinedSVal>();
if (!DV)
continue;
// Process the case when the argument is not a location.
assert(!haveRefTypeParam || DV->getAs<Loc>());
if (haveAttrNonNull && !DV->getAs<Loc>()) {
// If the argument is a union type, we want to handle a potential
// transparent_union GCC extension.
if (!ArgE)
continue;
QualType T = ArgE->getType();
const RecordType *UT = T->getAsUnionType();
if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
continue;
if (Optional<nonloc::CompoundVal> CSV =
DV->getAs<nonloc::CompoundVal>()) {
nonloc::CompoundVal::iterator CSV_I = CSV->begin();
assert(CSV_I != CSV->end());
V = *CSV_I;
DV = V.getAs<DefinedSVal>();
assert(++CSV_I == CSV->end());
// FIXME: Handle (some_union){ some_other_union_val }, which turns into
// a LazyCompoundVal inside a CompoundVal.
if (!V.getAs<Loc>())
continue;
// Retrieve the corresponding expression.
if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
if (const InitListExpr *IE =
dyn_cast<InitListExpr>(CE->getInitializer()))
ArgE = dyn_cast<Expr>(*(IE->begin()));
} else {
// FIXME: Handle LazyCompoundVals?
continue;
}
}
ConstraintManager &CM = C.getConstraintManager();
ProgramStateRef stateNotNull, stateNull;
std::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
if (stateNull) {
if (!stateNotNull) {
// Generate an error node. Check for a null node in case
// we cache out.
if (ExplodedNode *errorNode = C.generateErrorNode(stateNull)) {
//.........这里部分代码省略.........
示例11: NoteJumpIntoScopes
/// Produce note diagnostics for a jump into a protected scope.
void JumpScopeChecker::NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes) {
assert(!ToScopes.empty());
for (unsigned I = 0, E = ToScopes.size(); I != E; ++I)
if (Scopes[ToScopes[I]].InDiag)
S.Diag(Scopes[ToScopes[I]].Loc, Scopes[ToScopes[I]].InDiag);
}
示例12: return
// Find the minimum offset that we may store a value of size Size bits at. If
// IsAfter is set, look for an offset before the object, otherwise look for an
// offset after the object.
uint64_t
wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets,
bool IsAfter, uint64_t Size) {
// Find a minimum offset taking into account only vtable sizes.
uint64_t MinByte = 0;
for (const VirtualCallTarget &Target : Targets) {
if (IsAfter)
MinByte = std::max(MinByte, Target.minAfterBytes());
else
MinByte = std::max(MinByte, Target.minBeforeBytes());
}
// Build a vector of arrays of bytes covering, for each target, a slice of the
// used region (see AccumBitVector::BytesUsed in
// llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively,
// this aligns the used regions to start at MinByte.
//
// In this example, A, B and C are vtables, # is a byte already allocated for
// a virtual function pointer, AAAA... (etc.) are the used regions for the
// vtables and Offset(X) is the value computed for the Offset variable below
// for X.
//
// Offset(A)
// | |
// |MinByte
// A: ################AAAAAAAA|AAAAAAAA
// B: ########BBBBBBBBBBBBBBBB|BBBB
// C: ########################|CCCCCCCCCCCCCCCC
// | Offset(B) |
//
// This code produces the slices of A, B and C that appear after the divider
// at MinByte.
std::vector<ArrayRef<uint8_t>> Used;
for (const VirtualCallTarget &Target : Targets) {
ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed
: Target.TM->Bits->Before.BytesUsed;
uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes()
: MinByte - Target.minBeforeBytes();
// Disregard used regions that are smaller than Offset. These are
// effectively all-free regions that do not need to be checked.
if (VTUsed.size() > Offset)
Used.push_back(VTUsed.slice(Offset));
}
if (Size == 1) {
// Find a free bit in each member of Used.
for (unsigned I = 0;; ++I) {
uint8_t BitsUsed = 0;
for (auto &&B : Used)
if (I < B.size())
BitsUsed |= B[I];
if (BitsUsed != 0xff)
return (MinByte + I) * 8 +
countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined);
}
} else {
// Find a free (Size/8) byte region in each member of Used.
// FIXME: see if alignment helps.
for (unsigned I = 0;; ++I) {
for (auto &&B : Used) {
unsigned Byte = 0;
while ((I + Byte) < B.size() && Byte < (Size / 8)) {
if (B[I + Byte])
goto NextI;
++Byte;
}
}
return (MinByte + I) * 8;
NextI:;
}
}
}
示例13: p
std::auto_ptr<PBQPRAProblem> PBQPBuilder::build(MachineFunction *mf,
const LiveIntervals *lis,
const MachineLoopInfo *loopInfo,
const RegSet &vregs) {
LiveIntervals *LIS = const_cast<LiveIntervals*>(lis);
MachineRegisterInfo *mri = &mf->getRegInfo();
const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo();
std::auto_ptr<PBQPRAProblem> p(new PBQPRAProblem());
PBQP::Graph &g = p->getGraph();
RegSet pregs;
// Collect the set of preg intervals, record that they're used in the MF.
for (unsigned Reg = 1, e = tri->getNumRegs(); Reg != e; ++Reg) {
if (mri->def_empty(Reg))
continue;
pregs.insert(Reg);
mri->setPhysRegUsed(Reg);
}
BitVector reservedRegs = tri->getReservedRegs(*mf);
// Iterate over vregs.
for (RegSet::const_iterator vregItr = vregs.begin(), vregEnd = vregs.end();
vregItr != vregEnd; ++vregItr) {
unsigned vreg = *vregItr;
const TargetRegisterClass *trc = mri->getRegClass(vreg);
LiveInterval *vregLI = &LIS->getInterval(vreg);
// Record any overlaps with regmask operands.
BitVector regMaskOverlaps;
LIS->checkRegMaskInterference(*vregLI, regMaskOverlaps);
// Compute an initial allowed set for the current vreg.
typedef std::vector<unsigned> VRAllowed;
VRAllowed vrAllowed;
ArrayRef<uint16_t> rawOrder = trc->getRawAllocationOrder(*mf);
for (unsigned i = 0; i != rawOrder.size(); ++i) {
unsigned preg = rawOrder[i];
if (reservedRegs.test(preg))
continue;
// vregLI crosses a regmask operand that clobbers preg.
if (!regMaskOverlaps.empty() && !regMaskOverlaps.test(preg))
continue;
// vregLI overlaps fixed regunit interference.
bool Interference = false;
for (MCRegUnitIterator Units(preg, tri); Units.isValid(); ++Units) {
if (vregLI->overlaps(LIS->getRegUnit(*Units))) {
Interference = true;
break;
}
}
if (Interference)
continue;
// preg is usable for this virtual register.
vrAllowed.push_back(preg);
}
// Construct the node.
PBQP::Graph::NodeItr node =
g.addNode(PBQP::Vector(vrAllowed.size() + 1, 0));
// Record the mapping and allowed set in the problem.
p->recordVReg(vreg, node, vrAllowed.begin(), vrAllowed.end());
PBQP::PBQPNum spillCost = (vregLI->weight != 0.0) ?
vregLI->weight : std::numeric_limits<PBQP::PBQPNum>::min();
addSpillCosts(g.getNodeCosts(node), spillCost);
}
for (RegSet::const_iterator vr1Itr = vregs.begin(), vrEnd = vregs.end();
vr1Itr != vrEnd; ++vr1Itr) {
unsigned vr1 = *vr1Itr;
const LiveInterval &l1 = lis->getInterval(vr1);
const PBQPRAProblem::AllowedSet &vr1Allowed = p->getAllowedSet(vr1);
for (RegSet::const_iterator vr2Itr = llvm::next(vr1Itr);
vr2Itr != vrEnd; ++vr2Itr) {
unsigned vr2 = *vr2Itr;
const LiveInterval &l2 = lis->getInterval(vr2);
const PBQPRAProblem::AllowedSet &vr2Allowed = p->getAllowedSet(vr2);
assert(!l2.empty() && "Empty interval in vreg set?");
if (l1.overlaps(l2)) {
PBQP::Graph::EdgeItr edge =
g.addEdge(p->getNodeForVReg(vr1), p->getNodeForVReg(vr2),
PBQP::Matrix(vr1Allowed.size()+1, vr2Allowed.size()+1, 0));
addInterferenceCosts(g.getEdgeCosts(edge), vr1Allowed, vr2Allowed, tri);
}
}
}
return p;
}
示例14: getInstruction
MCDisassembler::DecodeStatus WebAssemblyDisassembler::getInstruction(
MCInst &MI, uint64_t &Size, ArrayRef<uint8_t> Bytes, uint64_t /*Address*/,
raw_ostream &OS, raw_ostream &CS) const {
Size = 0;
uint64_t Pos = 0;
// Read the opcode.
if (Pos + sizeof(uint64_t) > Bytes.size())
return MCDisassembler::Fail;
uint64_t Opcode = support::endian::read64le(Bytes.data() + Pos);
Pos += sizeof(uint64_t);
if (Opcode >= WebAssembly::INSTRUCTION_LIST_END)
return MCDisassembler::Fail;
MI.setOpcode(Opcode);
const MCInstrDesc &Desc = MCII->get(Opcode);
unsigned NumFixedOperands = Desc.NumOperands;
// If it's variadic, read the number of extra operands.
unsigned NumExtraOperands = 0;
if (Desc.isVariadic()) {
if (Pos + sizeof(uint64_t) > Bytes.size())
return MCDisassembler::Fail;
NumExtraOperands = support::endian::read64le(Bytes.data() + Pos);
Pos += sizeof(uint64_t);
}
// Read the fixed operands. These are described by the MCInstrDesc.
for (unsigned i = 0; i < NumFixedOperands; ++i) {
const MCOperandInfo &Info = Desc.OpInfo[i];
switch (Info.OperandType) {
case MCOI::OPERAND_IMMEDIATE:
case WebAssembly::OPERAND_P2ALIGN:
case WebAssembly::OPERAND_BASIC_BLOCK: {
if (Pos + sizeof(uint64_t) > Bytes.size())
return MCDisassembler::Fail;
uint64_t Imm = support::endian::read64le(Bytes.data() + Pos);
Pos += sizeof(uint64_t);
MI.addOperand(MCOperand::createImm(Imm));
break;
}
case MCOI::OPERAND_REGISTER: {
if (Pos + sizeof(uint64_t) > Bytes.size())
return MCDisassembler::Fail;
uint64_t Reg = support::endian::read64le(Bytes.data() + Pos);
Pos += sizeof(uint64_t);
MI.addOperand(MCOperand::createReg(Reg));
break;
}
case WebAssembly::OPERAND_FPIMM: {
// TODO: MC converts all floating point immediate operands to double.
// This is fine for numeric values, but may cause NaNs to change bits.
if (Pos + sizeof(uint64_t) > Bytes.size())
return MCDisassembler::Fail;
uint64_t Bits = support::endian::read64le(Bytes.data() + Pos);
Pos += sizeof(uint64_t);
double Imm;
memcpy(&Imm, &Bits, sizeof(Imm));
MI.addOperand(MCOperand::createFPImm(Imm));
break;
}
default:
llvm_unreachable("unimplemented operand kind");
}
}
// Read the extra operands.
assert(NumExtraOperands == 0 || Desc.isVariadic());
for (unsigned i = 0; i < NumExtraOperands; ++i) {
if (Pos + sizeof(uint64_t) > Bytes.size())
return MCDisassembler::Fail;
if (Desc.TSFlags & WebAssemblyII::VariableOpIsImmediate) {
// Decode extra immediate operands.
uint64_t Imm = support::endian::read64le(Bytes.data() + Pos);
MI.addOperand(MCOperand::createImm(Imm));
} else {
// Decode extra register operands.
uint64_t Reg = support::endian::read64le(Bytes.data() + Pos);
MI.addOperand(MCOperand::createReg(Reg));
}
Pos += sizeof(uint64_t);
}
Size = Pos;
return MCDisassembler::Success;
}
示例15: setCallSiteLandingPad
void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
ArrayRef<unsigned> Sites) {
LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
}