本文整理汇总了C++中Optional类的典型用法代码示例。如果您正苦于以下问题:C++ Optional类的具体用法?C++ Optional怎么用?C++ Optional使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Optional类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: getObjectVal
void UninitializedObjectChecker::checkEndFunction(
const ReturnStmt *RS, CheckerContext &Context) const {
const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>(
Context.getLocationContext()->getDecl());
if (!CtorDecl)
return;
if (!CtorDecl->isUserProvided())
return;
if (CtorDecl->getParent()->isUnion())
return;
// This avoids essentially the same error being reported multiple times.
if (willObjectBeAnalyzedLater(CtorDecl, Context))
return;
Optional<nonloc::LazyCompoundVal> Object = getObjectVal(CtorDecl, Context);
if (!Object)
return;
FindUninitializedFields F(Context.getState(), Object->getRegion(),
CheckPointeeInitialization);
const UninitFieldMap &UninitFields = F.getUninitFields();
if (UninitFields.empty())
return;
// In non-pedantic mode, if Object's region doesn't contain a single
// initialized field, we'll assume that Object was intentionally left
// uninitialized.
if (!IsPedantic && !F.isAnyFieldInitialized())
return;
// There are uninitialized fields in the record.
ExplodedNode *Node = Context.generateNonFatalErrorNode(Context.getState());
if (!Node)
return;
PathDiagnosticLocation LocUsedForUniqueing;
const Stmt *CallSite = Context.getStackFrame()->getCallSite();
if (CallSite)
LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
CallSite, Context.getSourceManager(), Node->getLocationContext());
// For Plist consumers that don't support notes just yet, we'll convert notes
// to warnings.
if (ShouldConvertNotesToWarnings) {
for (const auto &Pair : UninitFields) {
auto Report = llvm::make_unique<BugReport>(
*BT_uninitField, Pair.second, Node, LocUsedForUniqueing,
Node->getLocationContext()->getDecl());
Context.emitReport(std::move(Report));
}
return;
}
SmallString<100> WarningBuf;
llvm::raw_svector_ostream WarningOS(WarningBuf);
WarningOS << UninitFields.size() << " uninitialized field"
<< (UninitFields.size() == 1 ? "" : "s")
<< " at the end of the constructor call";
auto Report = llvm::make_unique<BugReport>(
*BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
Node->getLocationContext()->getDecl());
for (const auto &Pair : UninitFields) {
Report->addNote(Pair.second,
PathDiagnosticLocation::create(Pair.first->getDecl(),
Context.getSourceManager()));
}
Context.emitReport(std::move(Report));
}
示例2: ASSERT
PaintLayerPainter::PaintResult PaintLayerPainter::paintLayerContentsInternal(GraphicsContext* context, const PaintLayerPaintingInfo& paintingInfoArg, PaintLayerFlags paintFlags, FragmentPolicy fragmentPolicy)
{
ASSERT(m_paintLayer.isSelfPaintingLayer() || m_paintLayer.hasSelfPaintingLayerDescendant());
ASSERT(!(paintFlags & PaintLayerAppliedTransform));
bool isSelfPaintingLayer = m_paintLayer.isSelfPaintingLayer();
bool isPaintingOverlayScrollbars = paintFlags & PaintLayerPaintingOverlayScrollbars;
bool isPaintingScrollingContent = paintFlags & PaintLayerPaintingCompositingScrollingPhase;
bool isPaintingCompositedForeground = paintFlags & PaintLayerPaintingCompositingForegroundPhase;
bool isPaintingCompositedBackground = paintFlags & PaintLayerPaintingCompositingBackgroundPhase;
bool isPaintingOverflowContents = paintFlags & PaintLayerPaintingOverflowContents;
// Outline always needs to be painted even if we have no visible content. Also,
// the outline is painted in the background phase during composited scrolling.
// If it were painted in the foreground phase, it would move with the scrolled
// content. When not composited scrolling, the outline is painted in the
// foreground phase. Since scrolled contents are moved by paint invalidation in this
// case, the outline won't get 'dragged along'.
bool shouldPaintOutline = isSelfPaintingLayer && !isPaintingOverlayScrollbars
&& ((isPaintingScrollingContent && isPaintingCompositedBackground)
|| (!isPaintingScrollingContent && isPaintingCompositedForeground));
bool shouldPaintContent = m_paintLayer.hasVisibleContent() && isSelfPaintingLayer && !isPaintingOverlayScrollbars;
PaintResult result = FullyPainted;
if (paintFlags & PaintLayerPaintingRootBackgroundOnly && !m_paintLayer.layoutObject()->isLayoutView() && !m_paintLayer.layoutObject()->isDocumentElement())
return result;
PaintLayerPaintingInfo paintingInfo = paintingInfoArg;
// Ensure our lists are up-to-date.
m_paintLayer.stackingNode()->updateLayerListsIfNeeded();
LayoutPoint offsetFromRoot;
m_paintLayer.convertToLayerCoords(paintingInfo.rootLayer, offsetFromRoot);
if (m_paintLayer.compositingState() == PaintsIntoOwnBacking)
offsetFromRoot.move(m_paintLayer.subpixelAccumulation());
else
offsetFromRoot.move(paintingInfo.subPixelAccumulation);
LayoutRect bounds = m_paintLayer.physicalBoundingBox(offsetFromRoot);
if (!paintingInfo.paintDirtyRect.contains(bounds))
result = MaybeNotFullyPainted;
LayoutRect rootRelativeBounds;
bool rootRelativeBoundsComputed = false;
if (paintingInfo.ancestorHasClipPathClipping && m_paintLayer.layoutObject()->style()->position() != StaticPosition)
UseCounter::count(m_paintLayer.layoutObject()->document(), UseCounter::ClipPathOfPositionedElement);
// These helpers output clip and compositing operations using a RAII pattern. Stack-allocated-varibles are destructed in the reverse order of construction,
// so they are nested properly.
ClipPathHelper clipPathHelper(context, m_paintLayer, paintingInfo, rootRelativeBounds, rootRelativeBoundsComputed, offsetFromRoot, paintFlags);
Optional<CompositingRecorder> compositingRecorder;
// Blending operations must be performed only with the nearest ancestor stacking context.
// Note that there is no need to composite if we're painting the root.
// FIXME: this should be unified further into PaintLayer::paintsWithTransparency().
bool shouldCompositeForBlendMode = (!m_paintLayer.layoutObject()->isDocumentElement() || m_paintLayer.layoutObject()->isSVGRoot()) && m_paintLayer.stackingNode()->isStackingContext() && m_paintLayer.hasNonIsolatedDescendantWithBlendMode();
if (shouldCompositeForBlendMode || m_paintLayer.paintsWithTransparency(paintingInfo.globalPaintFlags())) {
FloatRect compositingBounds = FloatRect(m_paintLayer.paintingExtent(paintingInfo.rootLayer, paintingInfo.subPixelAccumulation, paintingInfo.globalPaintFlags()));
compositingRecorder.emplace(*context, *m_paintLayer.layoutObject(),
WebCoreCompositeToSkiaComposite(CompositeSourceOver, m_paintLayer.layoutObject()->style()->blendMode()),
m_paintLayer.layoutObject()->opacity(), &compositingBounds);
}
PaintLayerPaintingInfo localPaintingInfo(paintingInfo);
if (m_paintLayer.compositingState() == PaintsIntoOwnBacking)
localPaintingInfo.subPixelAccumulation = m_paintLayer.subpixelAccumulation();
PaintLayerFragments layerFragments;
if (shouldPaintContent || shouldPaintOutline || isPaintingOverlayScrollbars) {
// Collect the fragments. This will compute the clip rectangles and paint offsets for each layer fragment.
ClipRectsCacheSlot cacheSlot = (paintFlags & PaintLayerUncachedClipRects) ? UncachedClipRects : PaintingClipRects;
ShouldRespectOverflowClip respectOverflowClip = shouldRespectOverflowClip(paintFlags, m_paintLayer.layoutObject());
if (fragmentPolicy == ForceSingleFragment)
m_paintLayer.appendSingleFragmentIgnoringPagination(layerFragments, localPaintingInfo.rootLayer, localPaintingInfo.paintDirtyRect, cacheSlot, IgnoreOverlayScrollbarSize, respectOverflowClip, &offsetFromRoot, localPaintingInfo.subPixelAccumulation);
else
m_paintLayer.collectFragments(layerFragments, localPaintingInfo.rootLayer, localPaintingInfo.paintDirtyRect, cacheSlot, IgnoreOverlayScrollbarSize, respectOverflowClip, &offsetFromRoot, localPaintingInfo.subPixelAccumulation);
if (shouldPaintContent) {
// TODO(wangxianzhu): This is for old slow scrolling. Implement similar optimization for slimming paint v2.
shouldPaintContent = atLeastOneFragmentIntersectsDamageRect(layerFragments, localPaintingInfo, paintFlags, offsetFromRoot);
if (!shouldPaintContent)
result = MaybeNotFullyPainted;
}
}
bool selectionOnly = localPaintingInfo.globalPaintFlags() & GlobalPaintSelectionOnly;
// If this layer's layoutObject is a child of the paintingRoot, we paint unconditionally, which
// is done by passing a nil paintingRoot down to our layoutObject (as if no paintingRoot was ever set).
// Else, our layout tree may or may not contain the painting root, so we pass that root along
// so it will be tested against as we descend through the layoutObjects.
LayoutObject* paintingRootForLayoutObject = 0;
if (localPaintingInfo.paintingRoot && !m_paintLayer.layoutObject()->isDescendantOf(localPaintingInfo.paintingRoot))
paintingRootForLayoutObject = localPaintingInfo.paintingRoot;
{ // Begin block for the lifetime of any filter.
FilterPainter filterPainter(m_paintLayer, context, offsetFromRoot, layerFragments.isEmpty() ? ClipRect() : layerFragments[0].backgroundRect, localPaintingInfo, paintFlags,
rootRelativeBounds, rootRelativeBoundsComputed);
//.........这里部分代码省略.........
示例3: ENABLE
PaintLayerPainter::PaintResult PaintLayerPainter::paintChildren(unsigned childrenToVisit, GraphicsContext* context, const PaintLayerPaintingInfo& paintingInfo, PaintLayerFlags paintFlags)
{
PaintResult result = FullyPainted;
if (!m_paintLayer.hasSelfPaintingLayerDescendant())
return result;
#if ENABLE(ASSERT)
LayerListMutationDetector mutationChecker(m_paintLayer.stackingNode());
#endif
PaintLayerStackingNodeIterator iterator(*m_paintLayer.stackingNode(), childrenToVisit);
PaintLayerStackingNode* child = iterator.next();
if (!child)
return result;
DisplayItem::Type subsequenceType;
if (childrenToVisit == NegativeZOrderChildren) {
subsequenceType = DisplayItem::SubsequenceNegativeZOrder;
} else {
ASSERT(childrenToVisit == (NormalFlowChildren | PositiveZOrderChildren));
subsequenceType = DisplayItem::SubsequenceNormalFlowAndPositiveZOrder;
}
Optional<SubsequenceRecorder> subsequenceRecorder;
if (!paintingInfo.disableSubsequenceCache
&& !(paintingInfo.globalPaintFlags() & GlobalPaintFlattenCompositingLayers)
&& !(paintFlags & PaintLayerPaintingReflection)
&& !(paintFlags & PaintLayerPaintingRootBackgroundOnly)) {
if (!m_paintLayer.needsRepaint()
&& paintingInfo.scrollOffsetAccumulation == m_paintLayer.previousScrollOffsetAccumulationForPainting()
&& SubsequenceRecorder::useCachedSubsequenceIfPossible(*context, m_paintLayer, subsequenceType))
return result;
subsequenceRecorder.emplace(*context, m_paintLayer, subsequenceType);
}
IntSize scrollOffsetAccumulationForChildren = paintingInfo.scrollOffsetAccumulation;
if (m_paintLayer.layoutObject()->hasOverflowClip())
scrollOffsetAccumulationForChildren += m_paintLayer.layoutBox()->scrolledContentOffset();
bool disableChildSubsequenceCache = !RuntimeEnabledFeatures::slimmingPaintV2Enabled()
&& (m_paintLayer.layoutObject()->hasOverflowClip() || m_paintLayer.layoutObject()->hasClip());
for (; child; child = iterator.next()) {
PaintLayerPainter childPainter(*child->layer());
// If this Layer should paint into its own backing or a grouped backing, that will be done via CompositedLayerMapping::paintContents()
// and CompositedLayerMapping::doPaintTask().
if (!childPainter.shouldPaintLayerInSoftwareMode(paintingInfo.globalPaintFlags(), paintFlags))
continue;
PaintLayerPaintingInfo childPaintingInfo = paintingInfo;
childPaintingInfo.disableSubsequenceCache = disableChildSubsequenceCache;
childPaintingInfo.scrollOffsetAccumulation = scrollOffsetAccumulationForChildren;
// Rare case: accumulate scroll offset of non-stacking-context ancestors up to m_paintLayer.
for (PaintLayer* parentLayer = child->layer()->parent(); parentLayer != &m_paintLayer; parentLayer = parentLayer->parent()) {
if (parentLayer->layoutObject()->hasOverflowClip())
childPaintingInfo.scrollOffsetAccumulation += parentLayer->layoutBox()->scrolledContentOffset();
}
if (childPainter.paintLayer(context, childPaintingInfo, paintFlags) == MaybeNotFullyPainted)
result = MaybeNotFullyPainted;
}
// Set subsequence not cacheable if the bounding box of this layer and descendants is not fully contained
// by paintRect, because later paintRect changes may expose new contents which will need repainting.
if (result == MaybeNotFullyPainted && subsequenceRecorder)
subsequenceRecorder->setUncacheable();
return result;
}
示例4: matchVectorSplittingReduction
static ReductionKind
matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
unsigned &Opcode, Type *&Ty) {
if (!EnableReduxCost)
return RK_None;
// Need to extract the first element.
ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
unsigned Idx = ~0u;
if (CI)
Idx = CI->getZExtValue();
if (Idx != 0)
return RK_None;
auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
if (!RdxStart)
return RK_None;
Optional<ReductionData> RD = getReductionData(RdxStart);
if (!RD)
return RK_None;
Type *VecTy = ReduxRoot->getOperand(0)->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
if (!isPowerOf2_32(NumVecElems))
return RK_None;
// We look for a sequence of shuffles and adds like the following matching one
// fadd, shuffle vector pair at a time.
//
// %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
// %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
// %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
// %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
// %r = extractelement <4 x float> %bin.rdx8, i32 0
unsigned MaskStart = 1;
Instruction *RdxOp = RdxStart;
SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
unsigned NumVecElemsRemain = NumVecElems;
while (NumVecElemsRemain - 1) {
// Check for the right reduction operation.
if (!RdxOp)
return RK_None;
Optional<ReductionData> RDLevel = getReductionData(RdxOp);
if (!RDLevel || !RDLevel->hasSameData(*RD))
return RK_None;
Value *NextRdxOp;
ShuffleVectorInst *Shuffle;
std::tie(NextRdxOp, Shuffle) =
getShuffleAndOtherOprd(RDLevel->LHS, RDLevel->RHS);
// Check the current reduction operation and the shuffle use the same value.
if (Shuffle == nullptr)
return RK_None;
if (Shuffle->getOperand(0) != NextRdxOp)
return RK_None;
// Check that shuffle masks matches.
for (unsigned j = 0; j != MaskStart; ++j)
ShuffleMask[j] = MaskStart + j;
// Fill the rest of the mask with -1 for undef.
std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
if (ShuffleMask != Mask)
return RK_None;
RdxOp = dyn_cast<Instruction>(NextRdxOp);
NumVecElemsRemain /= 2;
MaskStart *= 2;
}
Opcode = RD->Opcode;
Ty = VecTy;
return RD->Kind;
}
示例5: TestDict
static void TestDict(DictPtr dict) {
Optional<DictEntry> entry;
entry = dict->MatchPrefix("BYVoid");
AssertTrue(!entry.IsNull());
AssertEquals("BYVoid", entry.Get().key);
AssertEquals("byv", entry.Get().GetDefault());
entry = dict->MatchPrefix("BYVoid123");
AssertTrue(!entry.IsNull());
AssertEquals("BYVoid", entry.Get().key);
AssertEquals("byv", entry.Get().GetDefault());
entry = dict->MatchPrefix(utf8("積羽沉舟"));
AssertTrue(!entry.IsNull());
AssertEquals(utf8("積羽沉舟"), entry.Get().key);
AssertEquals(utf8("羣輕折軸"), entry.Get().GetDefault());
entry = dict->MatchPrefix("Unknown");
AssertTrue(entry.IsNull());
const vector<DictEntry> matches = dict->MatchAllPrefixes(utf8("清華大學計算機系"));
AssertEquals(3, matches.size());
AssertEquals(utf8("清華大學"), matches.at(0).key);
AssertEquals("TsinghuaUniversity", matches.at(0).GetDefault());
AssertEquals(utf8("清華"), matches.at(1).key);
AssertEquals("Tsinghua", matches.at(1).GetDefault());
AssertEquals(utf8("清"), matches.at(2).key);
AssertEquals("Tsing", matches.at(2).GetDefault());
}
示例6: classifyDynamicCast
/// Try to classify the dynamic-cast relationship between two types.
DynamicCastFeasibility
swift::classifyDynamicCast(Module *M,
CanType source,
CanType target,
bool isSourceTypeExact,
bool isWholeModuleOpts) {
if (source == target) return DynamicCastFeasibility::WillSucceed;
auto sourceObject = source.getAnyOptionalObjectType();
auto targetObject = target.getAnyOptionalObjectType();
// A common level of optionality doesn't affect the feasibility.
if (sourceObject && targetObject) {
return classifyDynamicCast(M, sourceObject, targetObject);
// Nor does casting to a more optional type.
} else if (targetObject) {
return classifyDynamicCast(M, source, targetObject,
/* isSourceTypeExact */ false,
isWholeModuleOpts);
// Casting to a less-optional type can always fail.
} else if (sourceObject) {
return weakenSuccess(classifyDynamicCast(M, sourceObject, target,
/* isSourceTypeExact */ false,
isWholeModuleOpts));
}
assert(!sourceObject && !targetObject);
// Assume that casts to or from existential types or involving
// dependent types can always succeed. This is over-conservative.
if (source->hasArchetype() || source.isExistentialType() ||
target->hasArchetype() || target.isExistentialType()) {
auto *SourceNominalTy = source.getAnyNominal();
// Check conversions from non-protocol types into protocol types.
if (!source.isExistentialType() &&
SourceNominalTy &&
target.isExistentialType())
return classifyDynamicCastToProtocol(source, target, isWholeModuleOpts);
// Casts from class existential into a non-class can never succeed.
if (source->isClassExistentialType() &&
!target.isAnyExistentialType() &&
!target.getClassOrBoundGenericClass() &&
!isa<ArchetypeType>(target) &&
!mayBridgeToObjectiveC(M, target)) {
assert((target.getEnumOrBoundGenericEnum() ||
target.getStructOrBoundGenericStruct() ||
isa<TupleType>(target) ||
isa<SILFunctionType>(target) ||
isa<FunctionType>(target) ||
isa<MetatypeType>(target)) &&
"Target should be an enum, struct, tuple, metatype or function type");
return DynamicCastFeasibility::WillFail;
}
return DynamicCastFeasibility::MaySucceed;
}
// Metatype casts.
if (auto sourceMetatype = dyn_cast<AnyMetatypeType>(source)) {
auto targetMetatype = dyn_cast<AnyMetatypeType>(target);
if (!targetMetatype) return DynamicCastFeasibility::WillFail;
source = sourceMetatype.getInstanceType();
target = targetMetatype.getInstanceType();
if (source == target &&
targetMetatype.isAnyExistentialType() ==
sourceMetatype.isAnyExistentialType())
return DynamicCastFeasibility::WillSucceed;
if (targetMetatype.isAnyExistentialType() &&
(isa<ProtocolType>(target) || isa<ProtocolCompositionType>(target))) {
auto Feasibility = classifyDynamicCastToProtocol(source,
target,
isWholeModuleOpts);
// Cast from existential metatype to existential metatype may still
// succeed, even if we cannot prove anything statically.
if (Feasibility != DynamicCastFeasibility::WillFail ||
!sourceMetatype.isAnyExistentialType())
return Feasibility;
}
// If isSourceTypeExact is true, we know we are casting the result of a
// MetatypeInst instruction.
if (isSourceTypeExact) {
// If source or target are existentials, then it can be cast
// successfully only into itself.
if ((target.isAnyExistentialType() || source.isAnyExistentialType()) &&
target != source)
return DynamicCastFeasibility::WillFail;
}
// Casts from class existential metatype into a concrete non-class metatype
// can never succeed.
if (source->isClassExistentialType() &&
//.........这里部分代码省略.........
示例7: getEffectiveRelocModel
static Reloc::Model getEffectiveRelocModel(CodeModel::Model CM,
Optional<Reloc::Model> RM) {
if (!RM.hasValue() || CM == CodeModel::JITDefault)
return Reloc::Static;
return *RM;
}
示例8: getEffectiveRelocModel
static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
if (!RM.hasValue())
return Reloc::Static;
return *RM;
}
示例9: ParseParams
void
HTMLCanvasElement::ToBlob(JSContext* aCx,
FileCallback& aCallback,
const nsAString& aType,
const Optional<JS::Handle<JS::Value> >& aParams,
ErrorResult& aRv)
{
// do a trust check if this is a write-only canvas
if (mWriteOnly && !nsContentUtils::IsCallerChrome()) {
aRv.Throw(NS_ERROR_DOM_SECURITY_ERR);
return;
}
nsAutoString type;
aRv = nsContentUtils::ASCIIToLower(aType, type);
if (aRv.Failed()) {
return;
}
JS::Value encoderOptions = aParams.WasPassed()
? aParams.Value()
: JS::UndefinedValue();
nsAutoString params;
bool usingCustomParseOptions;
aRv = ParseParams(aCx, type, encoderOptions, params, &usingCustomParseOptions);
if (aRv.Failed()) {
return;
}
#ifdef DEBUG
if (mCurrentContext) {
// We disallow canvases of width or height zero, and set them to 1, so
// we will have a discrepancy with the sizes of the canvas and the context.
// That discrepancy is OK, the rest are not.
nsIntSize elementSize = GetWidthHeight();
MOZ_ASSERT(elementSize.width == mCurrentContext->GetWidth() ||
(elementSize.width == 0 && mCurrentContext->GetWidth() == 1));
MOZ_ASSERT(elementSize.height == mCurrentContext->GetHeight() ||
(elementSize.height == 0 && mCurrentContext->GetHeight() == 1));
}
#endif
nsCOMPtr<nsIScriptContext> scriptContext =
GetScriptContextFromJSContext(nsContentUtils::GetCurrentJSContext());
uint8_t* imageBuffer = nullptr;
int32_t format = 0;
if (mCurrentContext) {
mCurrentContext->GetImageBuffer(&imageBuffer, &format);
}
aRv = ImageEncoder::ExtractDataAsync(type,
params,
usingCustomParseOptions,
imageBuffer,
format,
GetSize(),
mCurrentContext,
scriptContext,
aCallback);
}
示例10: getModule
void NameBinder::addImport(
SmallVectorImpl<std::pair<ImportedModule, ImportOptions>> &imports,
ImportDecl *ID) {
if (ID->getModulePath().front().first == SF.getParentModule()->getName() &&
ID->getModulePath().size() == 1 && !shouldImportSelfImportClang(ID, SF)) {
// If the imported module name is the same as the current module,
// produce a diagnostic.
StringRef filename = llvm::sys::path::filename(SF.getFilename());
if (filename.empty())
Context.Diags.diagnose(ID, diag::sema_import_current_module,
ID->getModulePath().front().first);
else
Context.Diags.diagnose(ID, diag::sema_import_current_module_with_file,
filename, ID->getModulePath().front().first);
ID->setModule(SF.getParentModule());
return;
}
Module *M = getModule(ID->getModulePath());
if (!M) {
SmallString<64> modulePathStr;
interleave(ID->getModulePath(),
[&](ImportDecl::AccessPathElement elem) {
modulePathStr += elem.first.str();
},
[&] { modulePathStr += "."; });
auto diagKind = diag::sema_no_import;
if (SF.Kind == SourceFileKind::REPL || Context.LangOpts.DebuggerSupport)
diagKind = diag::sema_no_import_repl;
diagnose(ID->getLoc(), diagKind, modulePathStr);
if (Context.SearchPathOpts.SDKPath.empty() &&
llvm::Triple(llvm::sys::getProcessTriple()).isMacOSX()) {
diagnose(SourceLoc(), diag::sema_no_import_no_sdk);
diagnose(SourceLoc(), diag::sema_no_import_no_sdk_xcrun);
}
return;
}
ID->setModule(M);
Module *topLevelModule;
if (ID->getModulePath().size() == 1) {
topLevelModule = M;
} else {
// If we imported a submodule, import the top-level module as well.
Identifier topLevelName = ID->getModulePath().front().first;
topLevelModule = Context.getLoadedModule(topLevelName);
assert(topLevelModule && "top-level module missing");
}
auto *testableAttr = ID->getAttrs().getAttribute<TestableAttr>();
if (testableAttr && !topLevelModule->isTestingEnabled() &&
Context.LangOpts.EnableTestableAttrRequiresTestableModule) {
diagnose(ID->getModulePath().front().second, diag::module_not_testable,
topLevelModule->getName());
testableAttr->setInvalid();
}
ImportOptions options;
if (ID->isExported())
options |= SourceFile::ImportFlags::Exported;
if (testableAttr)
options |= SourceFile::ImportFlags::Testable;
imports.push_back({ { ID->getDeclPath(), M }, options });
if (topLevelModule != M)
imports.push_back({ { ID->getDeclPath(), topLevelModule }, options });
if (ID->getImportKind() != ImportKind::Module) {
// If we're importing a specific decl, validate the import kind.
using namespace namelookup;
auto declPath = ID->getDeclPath();
// FIXME: Doesn't handle scoped testable imports correctly.
assert(declPath.size() == 1 && "can't handle sub-decl imports");
SmallVector<ValueDecl *, 8> decls;
lookupInModule(topLevelModule, declPath, declPath.front().first, decls,
NLKind::QualifiedLookup, ResolutionKind::Overloadable,
/*resolver*/nullptr, &SF);
if (decls.empty()) {
diagnose(ID, diag::no_decl_in_module)
.highlight(SourceRange(declPath.front().second,
declPath.back().second));
return;
}
ID->setDecls(Context.AllocateCopy(decls));
Optional<ImportKind> actualKind = ImportDecl::findBestImportKind(decls);
if (!actualKind.hasValue()) {
// FIXME: print entire module name?
diagnose(ID, diag::ambiguous_decl_in_module,
declPath.front().first, M->getName());
for (auto next : decls)
diagnose(next, diag::found_candidate);
} else if (!isCompatibleImportKind(ID->getImportKind(), *actualKind)) {
//.........这里部分代码省略.........
示例11: findPreviousSpillSlot
/// Utility function for reservePreviousStackSlotForValue. Tries to find
/// stack slot index to which we have spilled value for previous statepoints.
/// LookUpDepth specifies maximum DFS depth this function is allowed to look.
static Optional<int> findPreviousSpillSlot(const Value *Val,
SelectionDAGBuilder &Builder,
int LookUpDepth) {
// Can not look any further - give up now
if (LookUpDepth <= 0)
return None;
// Spill location is known for gc relocates
if (const auto *Relocate = dyn_cast<GCRelocateInst>(Val)) {
const auto &SpillMap =
Builder.FuncInfo.StatepointSpillMaps[Relocate->getStatepoint()];
auto It = SpillMap.find(Relocate->getDerivedPtr());
if (It == SpillMap.end())
return None;
return It->second;
}
// Look through bitcast instructions.
if (const BitCastInst *Cast = dyn_cast<BitCastInst>(Val))
return findPreviousSpillSlot(Cast->getOperand(0), Builder, LookUpDepth - 1);
// Look through phi nodes
// All incoming values should have same known stack slot, otherwise result
// is unknown.
if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
Optional<int> MergedResult = None;
for (auto &IncomingValue : Phi->incoming_values()) {
Optional<int> SpillSlot =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
if (!SpillSlot.hasValue())
return None;
if (MergedResult.hasValue() && *MergedResult != *SpillSlot)
return None;
MergedResult = SpillSlot;
}
return MergedResult;
}
// TODO: We can do better for PHI nodes. In cases like this:
// ptr = phi(relocated_pointer, not_relocated_pointer)
// statepoint(ptr)
// We will return that stack slot for ptr is unknown. And later we might
// assign different stack slots for ptr and relocated_pointer. This limits
// llvm's ability to remove redundant stores.
// Unfortunately it's hard to accomplish in current infrastructure.
// We use this function to eliminate spill store completely, while
// in example we still need to emit store, but instead of any location
// we need to use special "preferred" location.
// TODO: handle simple updates. If a value is modified and the original
// value is no longer live, it would be nice to put the modified value in the
// same slot. This allows folding of the memory accesses for some
// instructions types (like an increment).
// statepoint (i)
// i1 = i+1
// statepoint (i1)
// However we need to be careful for cases like this:
// statepoint(i)
// i1 = i+1
// statepoint(i, i1)
// Here we want to reserve spill slot for 'i', but not for 'i+1'. If we just
// put handling of simple modifications in this function like it's done
// for bitcasts we might end up reserving i's slot for 'i+1' because order in
// which we visit values is unspecified.
// Don't know any information about this instruction
return None;
}
示例12: GetCFNumberSize
void CFNumberChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
const FunctionDecl *FD = C.getCalleeDecl(CE);
if (!FD)
return;
ASTContext &Ctx = C.getASTContext();
if (!ICreate) {
ICreate = &Ctx.Idents.get("CFNumberCreate");
IGetValue = &Ctx.Idents.get("CFNumberGetValue");
}
if (!(FD->getIdentifier() == ICreate || FD->getIdentifier() == IGetValue) ||
CE->getNumArgs() != 3)
return;
// Get the value of the "theType" argument.
const LocationContext *LCtx = C.getLocationContext();
SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);
// FIXME: We really should allow ranges of valid theType values, and
// bifurcate the state appropriately.
Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>();
if (!V)
return;
uint64_t NumberKind = V->getValue().getLimitedValue();
Optional<uint64_t> OptCFNumberSize = GetCFNumberSize(Ctx, NumberKind);
// FIXME: In some cases we can emit an error.
if (!OptCFNumberSize)
return;
uint64_t CFNumberSize = *OptCFNumberSize;
// Look at the value of the integer being passed by reference. Essentially
// we want to catch cases where the value passed in is not equal to the
// size of the type being created.
SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);
// FIXME: Eventually we should handle arbitrary locations. We can do this
// by having an enhanced memory model that does low-level typing.
Optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>();
if (!LV)
return;
const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts());
if (!R)
return;
QualType T = Ctx.getCanonicalType(R->getValueType());
// FIXME: If the pointee isn't an integer type, should we flag a warning?
// People can do weird stuff with pointers.
if (!T->isIntegralOrEnumerationType())
return;
uint64_t PrimitiveTypeSize = Ctx.getTypeSize(T);
if (PrimitiveTypeSize == CFNumberSize)
return;
// FIXME: We can actually create an abstract "CFNumber" object that has
// the bits initialized to the provided values.
ExplodedNode *N = C.generateNonFatalErrorNode();
if (N) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
bool isCreate = (FD->getIdentifier() == ICreate);
if (isCreate) {
os << (PrimitiveTypeSize == 8 ? "An " : "A ")
<< PrimitiveTypeSize << "-bit integer is used to initialize a "
<< "CFNumber object that represents "
<< (CFNumberSize == 8 ? "an " : "a ")
<< CFNumberSize << "-bit integer; ";
} else {
os << "A CFNumber object that represents "
<< (CFNumberSize == 8 ? "an " : "a ")
<< CFNumberSize << "-bit integer is used to initialize "
<< (PrimitiveTypeSize == 8 ? "an " : "a ")
<< PrimitiveTypeSize << "-bit integer; ";
}
if (PrimitiveTypeSize < CFNumberSize)
os << (CFNumberSize - PrimitiveTypeSize)
<< " bits of the CFNumber value will "
<< (isCreate ? "be garbage." : "overwrite adjacent storage.");
else
os << (PrimitiveTypeSize - CFNumberSize)
<< " bits of the integer value will be "
<< (isCreate ? "lost." : "garbage.");
if (!BT)
BT.reset(new APIMisuse(this, "Bad use of CFNumber APIs"));
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
report->addRange(CE->getArg(2)->getSourceRange());
C.emitReport(std::move(report));
//.........这里部分代码省略.........
示例13: onTelegram
void Messaging::onTelegram(const Optional<CoAP::Telegram>& telegram) {
auto message = messageFromTelegram(telegram);
if (message) onMessage(message.value(), telegram.value().getIP(), telegram.value().getPort());
}
示例14: processFunction
//.........这里部分代码省略.........
continue;
}
// Go through all users of the constant and try to fold them.
FoldedUsers.clear();
for (auto Use : I->getUses()) {
SILInstruction *User = Use->getUser();
DEBUG(llvm::dbgs() << " User: " << *User);
// It is possible that we had processed this user already. Do not try
// to fold it again if we had previously produced an error while folding
// it. It is not always possible to fold an instruction in case of error.
if (ErrorSet.count(User))
continue;
// Some constant users may indirectly cause folding of their users.
if (isa<StructInst>(User) || isa<TupleInst>(User)) {
WorkList.insert(User);
continue;
}
// Always consider cond_fail instructions as potential for DCE. If the
// expression feeding them is false, they are dead. We can't handle this
// as part of the constant folding logic, because there is no value
// they can produce (other than empty tuple, which is wasteful).
if (isa<CondFailInst>(User))
FoldedUsers.insert(User);
// Initialize ResultsInError as a None optional.
//
// We are essentially using this optional to represent 3 states: true,
// false, and n/a.
Optional<bool> ResultsInError;
// If we are asked to emit diagnostics, override ResultsInError with a
// Some optional initialized to false.
if (EnableDiagnostics)
ResultsInError = false;
// Try to fold the user. If ResultsInError is None, we do not emit any
// diagnostics. If ResultsInError is some, we use it as our return value.
SILValue C = constantFoldInstruction(*User, ResultsInError);
// If we did not pass in a None and the optional is set to true, add the
// user to our error set.
if (ResultsInError.hasValue() && ResultsInError.getValue())
ErrorSet.insert(User);
// We failed to constant propagate... continue...
if (!C)
continue;
// Ok, we have succeeded. Add user to the FoldedUsers list and perform the
// necessary cleanups, RAUWs, etc.
FoldedUsers.insert(User);
++NumInstFolded;
InvalidateInstructions = true;
// If the constant produced a tuple, be smarter than RAUW: explicitly nuke
// any tuple_extract instructions using the apply. This is a common case
// for functions returning multiple values.
if (auto *TI = dyn_cast<TupleInst>(C)) {
for (auto UI = User->use_begin(), E = User->use_end(); UI != E;) {
Operand *O = *UI++;
示例15: transferable
void
MessagePort::PostMessage(JSContext* aCx, JS::Handle<JS::Value> aMessage,
const Optional<Sequence<JS::Value>>& aTransferable,
ErrorResult& aRv)
{
// We *must* clone the data here, or the JS::Value could be modified
// by script
JS::Rooted<JS::Value> transferable(aCx, JS::UndefinedValue());
if (aTransferable.WasPassed()) {
const Sequence<JS::Value>& realTransferable = aTransferable.Value();
// Here we want to check if the transerable object list contains
// this port. No other checks are done.
for (const JS::Value& value : realTransferable) {
if (!value.isObject()) {
continue;
}
MessagePort* port = nullptr;
nsresult rv = UNWRAP_OBJECT(MessagePort, &value.toObject(), port);
if (NS_FAILED(rv)) {
continue;
}
if (port == this) {
aRv.Throw(NS_ERROR_DOM_DATA_CLONE_ERR);
return;
}
}
// The input sequence only comes from the generated bindings code, which
// ensures it is rooted.
JS::HandleValueArray elements =
JS::HandleValueArray::fromMarkedLocation(realTransferable.Length(),
realTransferable.Elements());
JSObject* array =
JS_NewArrayObject(aCx, elements);
if (!array) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return;
}
transferable.setObject(*array);
}
RefPtr<SharedMessagePortMessage> data = new SharedMessagePortMessage();
UniquePtr<AbstractTimelineMarker> start;
UniquePtr<AbstractTimelineMarker> end;
RefPtr<TimelineConsumers> timelines = TimelineConsumers::Get();
bool isTimelineRecording = timelines && !timelines->IsEmpty();
if (isTimelineRecording) {
start = MakeUnique<MessagePortTimelineMarker>(
ProfileTimelineMessagePortOperationType::SerializeData,
MarkerTracingType::START);
}
data->Write(aCx, aMessage, transferable, aRv);
if (isTimelineRecording) {
end = MakeUnique<MessagePortTimelineMarker>(
ProfileTimelineMessagePortOperationType::SerializeData,
MarkerTracingType::END);
timelines->AddMarkerForAllObservedDocShells(start);
timelines->AddMarkerForAllObservedDocShells(end);
}
if (NS_WARN_IF(aRv.Failed())) {
return;
}
// This message has to be ignored.
if (mState > eStateEntangled) {
return;
}
// If we are unshipped we are connected to the other port on the same thread.
if (mState == eStateUnshippedEntangled) {
MOZ_ASSERT(mUnshippedEntangledPort);
mUnshippedEntangledPort->mMessages.AppendElement(data);
mUnshippedEntangledPort->Dispatch();
return;
}
// Not entangled yet, but already closed/disentangled.
if (mState == eStateEntanglingForDisentangle ||
mState == eStateEntanglingForClose) {
return;
}
RemoveDocFromBFCache();
// Not entangled yet.
if (mState == eStateEntangling) {
mMessagesForTheOtherPort.AppendElement(data);
return;
}
//.........这里部分代码省略.........