本文整理汇总了C++中IRGenFunction类的典型用法代码示例。如果您正苦于以下问题:C++ IRGenFunction类的具体用法?C++ IRGenFunction怎么用?C++ IRGenFunction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IRGenFunction类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: os
void
irgen::emitTypeLayoutVerifier(IRGenFunction &IGF,
ArrayRef<CanType> formalTypes) {
llvm::Type *verifierArgTys[] = {
IGF.IGM.TypeMetadataPtrTy,
IGF.IGM.Int8PtrTy,
IGF.IGM.Int8PtrTy,
IGF.IGM.SizeTy,
IGF.IGM.Int8PtrTy,
};
auto verifierFnTy = llvm::FunctionType::get(IGF.IGM.VoidTy,
verifierArgTys,
/*var arg*/ false);
auto verifierFn = IGF.IGM.Module.getOrInsertFunction(
"_swift_debug_verifyTypeLayoutAttribute",
verifierFnTy);
struct VerifierArgumentBuffers {
Address runtimeBuf, staticBuf;
};
llvm::DenseMap<llvm::Type *, VerifierArgumentBuffers>
verifierArgBufs;
auto getSizeConstant = [&](Size sz) -> llvm::Constant * {
return llvm::ConstantInt::get(IGF.IGM.SizeTy, sz.getValue());
};
auto getAlignmentMaskConstant = [&](Alignment a) -> llvm::Constant * {
return llvm::ConstantInt::get(IGF.IGM.SizeTy, a.getValue() - 1);
};
auto getBoolConstant = [&](bool b) -> llvm::Constant * {
return llvm::ConstantInt::get(IGF.IGM.Int1Ty, b);
};
SmallString<20> numberBuf;
for (auto formalType : formalTypes) {
// Runtime type metadata always represents the maximal abstraction level of
// the type.
auto anyTy = ProtocolCompositionType::get(IGF.IGM.Context, {});
auto openedAnyTy = ArchetypeType::getOpened(anyTy);
auto maxAbstraction = AbstractionPattern(openedAnyTy);
auto &ti = IGF.getTypeInfoForUnlowered(maxAbstraction, formalType);
// If there's no fixed type info, we rely on the runtime anyway, so there's
// nothing to verify.
// TODO: There are some traits of partially-fixed layouts we could check too.
auto *fixedTI = dyn_cast<FixedTypeInfo>(&ti);
if (!fixedTI)
return;
auto metadata = IGF.emitTypeMetadataRef(formalType);
auto verify = [&](llvm::Value *runtimeVal,
llvm::Value *staticVal,
const llvm::Twine &description) {
assert(runtimeVal->getType() == staticVal->getType());
// Get or create buffers for the arguments.
VerifierArgumentBuffers bufs;
auto foundBufs = verifierArgBufs.find(runtimeVal->getType());
if (foundBufs != verifierArgBufs.end()) {
bufs = foundBufs->second;
} else {
Address runtimeBuf = IGF.createAlloca(runtimeVal->getType(),
IGF.IGM.getPointerAlignment(),
"runtime");
Address staticBuf = IGF.createAlloca(staticVal->getType(),
IGF.IGM.getPointerAlignment(),
"static");
bufs = {runtimeBuf, staticBuf};
verifierArgBufs[runtimeVal->getType()] = bufs;
}
IGF.Builder.CreateStore(runtimeVal, bufs.runtimeBuf);
IGF.Builder.CreateStore(staticVal, bufs.staticBuf);
auto runtimePtr = IGF.Builder.CreateBitCast(bufs.runtimeBuf.getAddress(),
IGF.IGM.Int8PtrTy);
auto staticPtr = IGF.Builder.CreateBitCast(bufs.staticBuf.getAddress(),
IGF.IGM.Int8PtrTy);
auto count = llvm::ConstantInt::get(IGF.IGM.SizeTy,
IGF.IGM.DataLayout.getTypeStoreSize(runtimeVal->getType()));
auto msg
= IGF.IGM.getAddrOfGlobalString(description.str());
IGF.Builder.CreateCall(
verifierFn, {metadata, runtimePtr, staticPtr, count, msg});
};
// Check that the fixed layout matches the runtime layout.
SILType layoutType = SILType::getPrimitiveObjectType(formalType);
verify(emitLoadOfSize(IGF, layoutType),
getSizeConstant(fixedTI->getFixedSize()),
"size");
verify(emitLoadOfAlignmentMask(IGF, layoutType),
getAlignmentMaskConstant(fixedTI->getFixedAlignment()),
"alignment mask");
verify(emitLoadOfStride(IGF, layoutType),
getSizeConstant(fixedTI->getFixedStride()),
"stride");
verify(emitLoadOfIsInline(IGF, layoutType),
getBoolConstant(fixedTI->getFixedPacking(IGF.IGM)
//.........这里部分代码省略.........
示例2: addAbstractForFulfillments
void LocalTypeDataCache::
addAbstractForFulfillments(IRGenFunction &IGF, FulfillmentMap &&fulfillments,
llvm::function_ref<AbstractSource()> createSource) {
// Add the source lazily.
Optional<unsigned> sourceIndex;
auto getSourceIndex = [&]() -> unsigned {
if (!sourceIndex) {
AbstractSources.emplace_back(createSource());
sourceIndex = AbstractSources.size() - 1;
}
return *sourceIndex;
};
for (auto &fulfillment : fulfillments) {
CanType type = CanType(fulfillment.first.first);
LocalTypeDataKind localDataKind;
// For now, ignore witness-table fulfillments when they're not for
// archetypes.
if (ProtocolDecl *protocol = fulfillment.first.second) {
if (auto archetype = dyn_cast<ArchetypeType>(type)) {
auto conformsTo = archetype->getConformsTo();
auto it = std::find(conformsTo.begin(), conformsTo.end(), protocol);
if (it == conformsTo.end()) continue;
localDataKind = LocalTypeDataKind::forAbstractProtocolWitnessTable(*it);
} else {
continue;
}
} else {
// Ignore type metadata fulfillments for non-dependent types that
// we can produce very cheaply. We don't want to end up emitting
// the type metadata for Int by chasing through N layers of metadata
// just because that path happens to be in the cache.
if (!type->hasArchetype() &&
getTypeMetadataAccessStrategy(IGF.IGM, type, /*preferDirect*/ true)
== MetadataAccessStrategy::Direct) {
continue;
}
localDataKind = LocalTypeDataKind::forTypeMetadata();
}
// Find the chain for the key.
auto key = getKey(type, localDataKind);
auto &chain = Map[key];
// Check whether there's already an entry that's at least as good as the
// fulfillment.
Optional<unsigned> fulfillmentCost;
auto getFulfillmentCost = [&]() -> unsigned {
if (!fulfillmentCost)
fulfillmentCost = fulfillment.second.Path.cost();
return *fulfillmentCost;
};
bool isConditional = IGF.isConditionalDominancePoint();
bool foundBetter = false;
for (CacheEntry *cur = chain.Root, *last = nullptr; cur;
last = cur, cur = cur->getNext()) {
// Ensure the entry is acceptable.
if (!IGF.isActiveDominancePointDominatedBy(cur->DefinitionPoint))
continue;
// Ensure that the entry isn't better than the fulfillment.
auto curCost = cur->cost();
if (curCost == 0 || curCost <= getFulfillmentCost()) {
foundBetter = true;
break;
}
// If the entry is defined at the current point, (1) we know there
// won't be a better entry and (2) we should remove it.
if (cur->DefinitionPoint == IGF.getActiveDominancePoint() &&
!isConditional) {
// Splice it out of the chain.
assert(!cur->isConditional());
chain.eraseEntry(last, cur);
break;
}
}
if (foundBetter) continue;
// Okay, make a new entry.
// Register with the conditional dominance scope if necessary.
if (isConditional) {
IGF.registerConditionalLocalTypeDataKey(key);
}
// Allocate the new entry.
auto newEntry = new AbstractCacheEntry(IGF.getActiveDominancePoint(),
isConditional,
getSourceIndex(),
std::move(fulfillment.second.Path));
// Add it to the front of the chain.
chain.push_front(newEntry);
}
//.........这里部分代码省略.........
示例3: while
MetadataResponse
LocalTypeDataCache::tryGet(IRGenFunction &IGF, LocalTypeDataKey key,
bool allowAbstract, DynamicMetadataRequest request) {
// Use the caching key.
key = key.getCachingKey();
auto it = Map.find(key);
if (it == Map.end()) return MetadataResponse();
auto &chain = it->second;
CacheEntry *best = nullptr;
Optional<OperationCost> bestCost;
CacheEntry *next = chain.Root;
while (next) {
CacheEntry *cur = next;
next = cur->getNext();
// Ignore abstract entries if so requested.
if (!allowAbstract && !isa<ConcreteCacheEntry>(cur))
continue;
// Ignore unacceptable entries.
if (!IGF.isActiveDominancePointDominatedBy(cur->DefinitionPoint))
continue;
// If there's a collision, compare by cost, ignoring higher-cost entries.
if (best) {
// Compute the cost of the best entry if we haven't done so already.
// If that's zero, go ahead and short-circuit out.
if (!bestCost) {
bestCost = best->costForRequest(key, request);
if (*bestCost == OperationCost::Free) break;
}
auto curCost = cur->costForRequest(key, request);
if (curCost >= *bestCost) continue;
// Replace the best cost and fall through.
bestCost = curCost;
}
best = cur;
}
// If we didn't find anything, we're done.
if (!best) return MetadataResponse();
// Okay, we've found the best entry available.
switch (best->getKind()) {
// For concrete caches, this is easy.
case CacheEntry::Kind::Concrete: {
auto entry = cast<ConcreteCacheEntry>(best);
if (entry->immediatelySatisfies(key, request))
return entry->Value;
assert(key.Kind.isAnyTypeMetadata());
// Emit a dynamic check that the type metadata matches the request.
// TODO: we could potentially end up calling this redundantly with a
// dynamic request. Fortunately, those are used only in very narrow
// circumstances.
auto response = emitCheckTypeMetadataState(IGF, request, entry->Value);
// Add a concrete entry for the checked result.
IGF.setScopedLocalTypeData(key, response);
return response;
}
// For abstract caches, we need to follow a path.
case CacheEntry::Kind::Abstract: {
auto entry = cast<AbstractCacheEntry>(best);
// Follow the path.
auto &source = AbstractSources[entry->SourceIndex];
auto response = entry->follow(IGF, source, request);
// Following the path automatically caches at every point along it,
// including the end.
assert(chain.Root->DefinitionPoint == IGF.getActiveDominancePoint());
assert(isa<ConcreteCacheEntry>(chain.Root));
return response;
}
}
llvm_unreachable("bad cache entry kind");
}
示例4: emitScalarExistentialDowncast
/// Emit a checked cast to a protocol or protocol composition.
void irgen::emitScalarExistentialDowncast(IRGenFunction &IGF,
llvm::Value *value,
SILType srcType,
SILType destType,
CheckedCastMode mode,
Optional<MetatypeRepresentation> metatypeKind,
Explosion &ex) {
SmallVector<ProtocolDecl*, 4> allProtos;
destType.getSwiftRValueType().getAnyExistentialTypeProtocols(allProtos);
// Look up witness tables for the protocols that need them and get
// references to the ObjC Protocol* values for the objc protocols.
SmallVector<llvm::Value*, 4> objcProtos;
SmallVector<llvm::Value*, 4> witnessTableProtos;
bool hasClassConstraint = false;
bool hasClassConstraintByProtocol = false;
for (auto proto : allProtos) {
// If the protocol introduces a class constraint, track whether we need
// to check for it independent of protocol witnesses.
if (proto->requiresClass()) {
hasClassConstraint = true;
if (proto->getKnownProtocolKind()
&& *proto->getKnownProtocolKind() == KnownProtocolKind::AnyObject) {
// AnyObject only requires that the type be a class.
continue;
}
// If this protocol is class-constrained but not AnyObject, checking its
// conformance will check the class constraint too.
hasClassConstraintByProtocol = true;
}
if (Lowering::TypeConverter::protocolRequiresWitnessTable(proto)) {
auto descriptor = emitProtocolDescriptorRef(IGF, proto);
witnessTableProtos.push_back(descriptor);
}
if (!proto->isObjC())
continue;
objcProtos.push_back(emitReferenceToObjCProtocol(IGF, proto));
}
llvm::Type *resultType;
if (metatypeKind) {
switch (*metatypeKind) {
case MetatypeRepresentation::Thin:
llvm_unreachable("can't cast to thin metatype");
case MetatypeRepresentation::Thick:
resultType = IGF.IGM.TypeMetadataPtrTy;
break;
case MetatypeRepresentation::ObjC:
resultType = IGF.IGM.ObjCClassPtrTy;
break;
}
} else {
auto schema = IGF.getTypeInfo(destType).getSchema();
resultType = schema[0].getScalarType();
}
// We only need to check the class constraint for metatype casts where
// no protocol conformance indirectly requires the constraint for us.
bool checkClassConstraint =
(bool)metatypeKind && hasClassConstraint && !hasClassConstraintByProtocol;
llvm::Value *resultValue = value;
// If we don't have anything we really need to check, then trivially succeed.
if (objcProtos.empty() && witnessTableProtos.empty() &&
!checkClassConstraint) {
resultValue = IGF.Builder.CreateBitCast(value, resultType);
ex.add(resultValue);
return;
}
// Check the ObjC protocol conformances if there were any.
llvm::Value *objcCast = nullptr;
if (!objcProtos.empty()) {
// Get the ObjC instance or class object to check for these conformances.
llvm::Value *objcObject;
if (metatypeKind) {
switch (*metatypeKind) {
case MetatypeRepresentation::Thin:
llvm_unreachable("can't cast to thin metatype");
case MetatypeRepresentation::Thick: {
// The metadata might be for a non-class type, which wouldn't have
// an ObjC class object.
objcObject = nullptr;
break;
}
case MetatypeRepresentation::ObjC:
// Metatype is already an ObjC object.
objcObject = value;
break;
}
} else {
// Class instance is already an ObjC object.
objcObject = value;
//.........这里部分代码省略.........
示例5: emitSubSwitch
static void emitSubSwitch(IRGenFunction &IGF,
MutableArrayRef<EnumPayload::LazyValue> values,
APInt mask,
MutableArrayRef<std::pair<APInt, llvm::BasicBlock *>> cases,
SwitchDefaultDest dflt) {
recur:
assert(!values.empty() && "didn't exit out when exhausting all values?!");
assert(!cases.empty() && "switching with no cases?!");
auto &DL = IGF.IGM.DataLayout;
auto &pv = values.front();
values = values.slice(1);
auto payloadTy = getPayloadType(pv);
unsigned size = DL.getTypeSizeInBits(payloadTy);
// Grab a chunk of the mask.
auto maskPiece = mask.zextOrTrunc(size);
mask = mask.lshr(size);
// If the piece is zero, this doesn't affect the switch. We can just move
// forward and recur.
if (maskPiece == 0) {
for (auto &casePair : cases)
casePair.first = casePair.first.lshr(size);
goto recur;
}
// Force the value we will test.
auto v = forcePayloadValue(pv);
auto payloadIntTy = llvm::IntegerType::get(IGF.IGM.getLLVMContext(), size);
// Need to coerce to integer for 'icmp eq' if it's not already an integer
// or pointer. (Switching or masking will also require a cast to integer.)
if (!isa<llvm::IntegerType>(v->getType())
&& !isa<llvm::PointerType>(v->getType()))
v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy);
// Apply the mask if it's interesting.
if (!maskPiece.isAllOnesValue()) {
v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy);
auto maskConstant = llvm::ConstantInt::get(payloadIntTy, maskPiece);
v = IGF.Builder.CreateAnd(v, maskConstant);
}
// Gather the values we will switch over for this payload chunk.
// FIXME: std::map is lame. Should hash APInts.
std::map<APInt, SmallVector<std::pair<APInt, llvm::BasicBlock*>, 2>, ult>
subCases;
for (auto casePair : cases) {
// Grab a chunk of the value.
auto valuePiece = casePair.first.zextOrTrunc(size);
// Index the case according to this chunk.
subCases[valuePiece].push_back({std::move(casePair.first).lshr(size),
casePair.second});
}
bool needsAdditionalCases = !values.empty() && mask != 0;
SmallVector<std::pair<llvm::BasicBlock *, decltype(cases)>, 2> recursiveCases;
auto blockForCases
= [&](MutableArrayRef<std::pair<APInt, llvm::BasicBlock*>> cases)
-> llvm::BasicBlock *
{
// If we need to recur, emit a new block.
if (needsAdditionalCases) {
auto newBB = IGF.createBasicBlock("");
recursiveCases.push_back({newBB, cases});
return newBB;
}
// Otherwise, we can jump directly to the ultimate destination.
assert(cases.size() == 1 && "more than one case for final destination?!");
return cases.front().second;
};
// If there's only one case, do a cond_br.
if (subCases.size() == 1) {
auto &subCase = *subCases.begin();
llvm::BasicBlock *block = blockForCases(subCase.second);
// If the default case is unreachable, we don't need to conditionally
// branch.
if (dflt.getInt()) {
IGF.Builder.CreateBr(block);
goto next;
}
auto &valuePiece = subCase.first;
llvm::Value *valueConstant = llvm::ConstantInt::get(payloadIntTy,
valuePiece);
valueConstant = IGF.Builder.CreateBitOrPointerCast(valueConstant,
v->getType());
auto cmp = IGF.Builder.CreateICmpEQ(v, valueConstant);
IGF.Builder.CreateCondBr(cmp, block, dflt.getPointer());
goto next;
}
// Otherwise, do a switch.
{
v = IGF.Builder.CreateBitOrPointerCast(v, payloadIntTy);
//.........这里部分代码省略.........
示例6: emitScalarExistentialDowncast
/// Emit a checked cast to a protocol or protocol composition.
void irgen::emitScalarExistentialDowncast(IRGenFunction &IGF,
llvm::Value *value,
SILType srcType,
SILType destType,
CheckedCastMode mode,
Optional<MetatypeRepresentation> metatypeKind,
Explosion &ex) {
auto srcInstanceType = srcType.getSwiftRValueType();
auto destInstanceType = destType.getSwiftRValueType();
while (auto metatypeType = dyn_cast<ExistentialMetatypeType>(
destInstanceType)) {
destInstanceType = metatypeType.getInstanceType();
srcInstanceType = cast<AnyMetatypeType>(srcInstanceType).getInstanceType();
}
auto layout = destInstanceType.getExistentialLayout();
// Look up witness tables for the protocols that need them and get
// references to the ObjC Protocol* values for the objc protocols.
SmallVector<llvm::Value*, 4> objcProtos;
SmallVector<llvm::Value*, 4> witnessTableProtos;
bool hasClassConstraint = layout.requiresClass();
bool hasClassConstraintByProtocol = false;
bool hasSuperclassConstraint = bool(layout.superclass);
for (auto protoTy : layout.getProtocols()) {
auto *protoDecl = protoTy->getDecl();
// If the protocol introduces a class constraint, track whether we need
// to check for it independent of protocol witnesses.
if (protoDecl->requiresClass()) {
assert(hasClassConstraint);
hasClassConstraintByProtocol = true;
}
if (Lowering::TypeConverter::protocolRequiresWitnessTable(protoDecl)) {
auto descriptor = emitProtocolDescriptorRef(IGF, protoDecl);
witnessTableProtos.push_back(descriptor);
}
if (protoDecl->isObjC())
objcProtos.push_back(emitReferenceToObjCProtocol(IGF, protoDecl));
}
llvm::Type *resultType;
if (metatypeKind) {
switch (*metatypeKind) {
case MetatypeRepresentation::Thin:
llvm_unreachable("can't cast to thin metatype");
case MetatypeRepresentation::Thick:
resultType = IGF.IGM.TypeMetadataPtrTy;
break;
case MetatypeRepresentation::ObjC:
resultType = IGF.IGM.ObjCClassPtrTy;
break;
}
} else {
auto schema = IGF.getTypeInfo(destType).getSchema();
resultType = schema[0].getScalarType();
}
// The source of a scalar cast is statically known to be a class or a
// metatype, so we only have to check the class constraint in two cases:
//
// 1) The destination type has an explicit superclass constraint that is
// more derived than what the source type is known to be.
//
// 2) We are casting between metatypes, in which case the source might
// be a non-class metatype.
bool checkClassConstraint = false;
if ((bool)metatypeKind &&
hasClassConstraint &&
!hasClassConstraintByProtocol &&
!srcInstanceType->mayHaveSuperclass())
checkClassConstraint = true;
// If the source has an equal or more derived superclass constraint than
// the destination, we can elide the superclass check.
//
// Note that destInstanceType is always an existential type, so calling
// getSuperclass() returns the superclass constraint of the existential,
// not the superclass of some concrete class.
bool checkSuperclassConstraint =
hasSuperclassConstraint &&
!destInstanceType->getSuperclass()->isExactSuperclassOf(srcInstanceType);
if (checkSuperclassConstraint)
checkClassConstraint = true;
llvm::Value *resultValue = value;
// If we don't have anything we really need to check, then trivially succeed.
if (objcProtos.empty() && witnessTableProtos.empty() &&
!checkClassConstraint) {
resultValue = IGF.Builder.CreateBitCast(value, resultType);
ex.add(resultValue);
return;
//.........这里部分代码省略.........
示例7: emitBuiltinCall
/// emitBuiltinCall - Emit a call to a builtin function.
void irgen::emitBuiltinCall(IRGenFunction &IGF, Identifier FnId,
SILType resultType,
Explosion &args, Explosion &out,
SubstitutionList substitutions) {
// Decompose the function's name into a builtin name and type list.
const BuiltinInfo &Builtin = IGF.getSILModule().getBuiltinInfo(FnId);
if (Builtin.ID == BuiltinValueKind::UnsafeGuaranteedEnd) {
// Just consume the incoming argument.
assert(args.size() == 1 && "Expecting one incoming argument");
(void)args.claimAll();
return;
}
if (Builtin.ID == BuiltinValueKind::UnsafeGuaranteed) {
// Just forward the incoming argument.
assert(args.size() == 1 && "Expecting one incoming argument");
out = std::move(args);
// This is a token.
out.add(llvm::ConstantInt::get(IGF.IGM.Int8Ty, 0));
return;
}
if (Builtin.ID == BuiltinValueKind::OnFastPath) {
// The onFastPath builtin has only an effect on SIL level, so we lower it
// to a no-op.
return;
}
// These builtins don't care about their argument:
if (Builtin.ID == BuiltinValueKind::Sizeof) {
(void)args.claimAll();
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
substitutions[0].getReplacement());
out.add(valueTy.second.getSize(IGF, valueTy.first));
return;
}
if (Builtin.ID == BuiltinValueKind::Strideof) {
(void)args.claimAll();
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
substitutions[0].getReplacement());
out.add(valueTy.second.getStride(IGF, valueTy.first));
return;
}
if (Builtin.ID == BuiltinValueKind::Alignof) {
(void)args.claimAll();
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
substitutions[0].getReplacement());
// The alignof value is one greater than the alignment mask.
out.add(IGF.Builder.CreateAdd(
valueTy.second.getAlignmentMask(IGF, valueTy.first),
IGF.IGM.getSize(Size(1))));
return;
}
if (Builtin.ID == BuiltinValueKind::IsPOD) {
(void)args.claimAll();
auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM,
substitutions[0].getReplacement());
out.add(valueTy.second.getIsPOD(IGF, valueTy.first));
return;
}
// addressof expects an lvalue argument.
if (Builtin.ID == BuiltinValueKind::AddressOf) {
llvm::Value *address = args.claimNext();
llvm::Value *value = IGF.Builder.CreateBitCast(address,
IGF.IGM.Int8PtrTy);
out.add(value);
return;
}
// Everything else cares about the (rvalue) argument.
// If this is an LLVM IR intrinsic, lower it to an intrinsic call.
const IntrinsicInfo &IInfo = IGF.getSILModule().getIntrinsicInfo(FnId);
llvm::Intrinsic::ID IID = IInfo.ID;
// Calls to the int_instrprof_increment intrinsic are emitted during SILGen.
// At that stage, the function name GV used by the profiling pass is hidden.
// Fix the intrinsic call here by pointing it to the correct GV.
if (IID == llvm::Intrinsic::instrprof_increment) {
// Extract the PGO function name.
auto *NameGEP = cast<llvm::User>(args.claimNext());
auto *NameGV = dyn_cast<llvm::GlobalVariable>(NameGEP->stripPointerCasts());
if (NameGV) {
auto *NameC = NameGV->getInitializer();
StringRef Name = cast<llvm::ConstantDataArray>(NameC)->getRawDataValues();
StringRef PGOFuncName = Name.rtrim(StringRef("\0", 1));
// Point the increment call to the right function name variable.
std::string PGOFuncNameVar = llvm::getPGOFuncNameVarName(
PGOFuncName, llvm::GlobalValue::LinkOnceAnyLinkage);
auto *FuncNamePtr = IGF.IGM.Module.getNamedGlobal(PGOFuncNameVar);
if (FuncNamePtr) {
//.........这里部分代码省略.........