本文整理汇总了C++中SILValue类的典型用法代码示例。如果您正苦于以下问题:C++ SILValue类的具体用法?C++ SILValue怎么用?C++ SILValue使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SILValue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: emitInjectOptionalValueInto
void SILGenFunction::emitInjectOptionalValueInto(SILLocation loc,
ArgumentSource &&value,
SILValue dest,
const TypeLowering &optTL) {
SILType optType = optTL.getLoweredType();
assert(dest->getType() == optType.getAddressType());
auto loweredPayloadTy = optType.getAnyOptionalObjectType();
assert(loweredPayloadTy);
// Project out the payload area.
auto someDecl = getASTContext().getOptionalSomeDecl();
auto destPayload =
B.createInitEnumDataAddr(loc, dest, someDecl,
loweredPayloadTy.getAddressType());
// Emit the value into the payload area.
TemporaryInitialization emitInto(destPayload, CleanupHandle::invalid());
std::move(value).forwardInto(*this, &emitInto);
// Inject the tag.
B.createInjectEnumAddr(loc, dest, someDecl);
}
示例2: splitDestructure
static void splitDestructure(SILBuilder &B, SILInstruction *I, SILValue Op) {
assert((isa<DestructureStructInst>(I) || isa<DestructureTupleInst>(I)) &&
"Only destructure operations can be passed to splitDestructure");
SILModule &M = I->getModule();
SILLocation Loc = I->getLoc();
SILType OpType = Op->getType();
llvm::SmallVector<Projection, 8> Projections;
Projection::getFirstLevelProjections(OpType, M, Projections);
assert(Projections.size() == I->getNumResults());
llvm::SmallVector<SILValue, 8> NewValues;
for (unsigned i : indices(Projections)) {
const auto &Proj = Projections[i];
NewValues.push_back(Proj.createObjectProjection(B, Loc, Op).get());
assert(NewValues.back()->getType() == I->getResults()[i]->getType() &&
"Expected created projections and results to be the same types");
}
I->replaceAllUsesPairwiseWith(NewValues);
I->eraseFromParent();
}
示例3: extendArgumentLifetime
void CallSiteDescriptor::extendArgumentLifetime(
SILValue Arg, SILArgumentConvention ArgConvention) const {
assert(!CInfo->LifetimeFrontier.empty() &&
"Need a post-dominating release(s)");
auto ArgTy = Arg->getType();
// Extend the lifetime of a captured argument to cover the callee.
SILBuilderWithScope Builder(getClosure());
// Indirect non-inout arguments are not supported yet.
assert(!isNonInoutIndirectSILArgument(Arg, ArgConvention));
if (ArgTy.isObject()) {
Builder.createRetainValue(getClosure()->getLoc(), Arg,
Builder.getDefaultAtomicity());
for (auto *I : CInfo->LifetimeFrontier) {
Builder.setInsertionPoint(I);
Builder.createReleaseValue(getClosure()->getLoc(), Arg,
Builder.getDefaultAtomicity());
}
}
}
示例4: computeSubelement
/// Compute the subelement number indicated by the specified pointer (which is
/// derived from the root by a series of tuple/struct element addresses) by
/// treating the type as a linearized namespace with sequential elements. For
/// example, given:
///
/// root = alloc { a: { c: i64, d: i64 }, b: (i64, i64) }
/// tmp1 = struct_element_addr root, 1
/// tmp2 = tuple_element_addr tmp1, 0
///
/// This will return a subelement number of 2.
///
/// If this pointer is to within a existential projection, it returns ~0U.
///
static unsigned computeSubelement(SILValue Pointer, SILInstruction *RootInst) {
unsigned SubEltNumber = 0;
SILModule &M = RootInst->getModule();
while (1) {
// If we got to the root, we're done.
if (RootInst == Pointer.getDef())
return SubEltNumber;
auto *Inst = cast<SILInstruction>(Pointer);
if (auto *TEAI = dyn_cast<TupleElementAddrInst>(Inst)) {
SILType TT = TEAI->getOperand().getType();
// Keep track of what subelement is being referenced.
for (unsigned i = 0, e = TEAI->getFieldNo(); i != e; ++i) {
SubEltNumber += getNumSubElements(TT.getTupleElementType(i), M);
}
Pointer = TEAI->getOperand();
} else if (auto *SEAI = dyn_cast<StructElementAddrInst>(Inst)) {
SILType ST = SEAI->getOperand().getType();
// Keep track of what subelement is being referenced.
StructDecl *SD = SEAI->getStructDecl();
for (auto *D : SD->getStoredProperties()) {
if (D == SEAI->getField()) break;
SubEltNumber += getNumSubElements(ST.getFieldType(D, M), M);
}
Pointer = SEAI->getOperand();
} else {
assert((isa<InitExistentialAddrInst>(Inst) || isa<InjectEnumAddrInst>(Inst))&&
"Unknown access path instruction");
// Cannot promote loads and stores from within an existential projection.
return ~0U;
}
}
}
示例5: classify
AccessedStorage::Kind AccessedStorage::classify(SILValue base) {
switch (base->getKind()) {
// An AllocBox is a fully identified memory location.
case ValueKind::AllocBoxInst:
return Box;
// An AllocStack is a fully identified memory location, which may occur
// after inlining code already subjected to stack promotion.
case ValueKind::AllocStackInst:
return Stack;
case ValueKind::GlobalAddrInst:
return Global;
case ValueKind::ApplyInst: {
FullApplySite apply(cast<ApplyInst>(base));
if (auto *funcRef = apply.getReferencedFunction()) {
if (getVariableOfGlobalInit(funcRef))
return Global;
}
return Unidentified;
}
case ValueKind::RefElementAddrInst:
return Class;
// A yield is effectively a nested access, enforced independently in
// the caller and callee.
case ValueKind::BeginApplyResult:
return Yield;
// A function argument is effectively a nested access, enforced
// independently in the caller and callee.
case ValueKind::SILFunctionArgument:
return Argument;
// View the outer begin_access as a separate location because nested
// accesses do not conflict with each other.
case ValueKind::BeginAccessInst:
return Nested;
default:
return Unidentified;
}
}
示例6: mapOperands
static void mapOperands(SILInstruction *I,
const llvm::DenseMap<ValueBase *, SILValue> &ValueMap) {
for (auto &Opd : I->getAllOperands()) {
SILValue OrigVal = Opd.get();
ValueBase *OrigDef = OrigVal.getDef();
auto Found = ValueMap.find(OrigDef);
if (Found != ValueMap.end()) {
SILValue MappedVal = Found->second;
unsigned ResultIdx = OrigVal.getResultNumber();
// All mapped instructions have their result number set to zero. Except
// for arguments that we followed along one edge to their incoming value
// on that edge.
if (isa<SILArgument>(OrigDef))
ResultIdx = MappedVal.getResultNumber();
Opd.set(SILValue(MappedVal.getDef(), ResultIdx));
}
}
}
示例7: createValueProjection
NullablePtr<SILInstruction>
Projection::
createValueProjection(SILBuilder &B, SILLocation Loc, SILValue Base) const {
// Grab Base's type.
SILType BaseTy = Base.getType();
// If BaseTy is not an object type, bail.
if (!BaseTy.isObject())
return nullptr;
// If this projection is associated with an address type, convert its type to
// an object type.
//
// We explicitly do not convert Type to be an object if it is a local storage
// type since we want it to fail.
SILType Ty = Type.isAddress()? Type.getObjectType() : Type;
if (!Ty.isObject())
return nullptr;
// Ok, we now know that the type of Base and the type represented by the base
// of this projection match and that this projection can be represented as
// value. Create the instruction if we can. Otherwise, return nullptr.
switch (getKind()) {
case ProjectionKind::Struct:
return B.createStructExtract(Loc, Base, cast<VarDecl>(getDecl()));
case ProjectionKind::Tuple:
return B.createTupleExtract(Loc, Base, getIndex());
case ProjectionKind::Index:
return nullptr;
case ProjectionKind::Enum:
return B.createUncheckedEnumData(Loc, Base,
cast<EnumElementDecl>(getDecl()));
case ProjectionKind::Class:
return nullptr;
}
}
示例8: DEBUG
void MemoryToRegisters::removeSingleBlockAllocation(AllocStackInst *ASI) {
DEBUG(llvm::dbgs() << "*** Promoting in-block: " << *ASI);
SILBasicBlock *BB = ASI->getParent();
// The default value of the AllocStack is NULL because we don't have
// uninitialized variables in Swift.
SILValue RunningVal = SILValue();
// For all instructions in the block.
for (auto BBI = BB->begin(), E = BB->end(); BBI != E;) {
SILInstruction *Inst = &*BBI;
++BBI;
// Remove instructions that we are loading from. Replace the loaded value
// with our running value.
if (isLoadFromStack(Inst, ASI)) {
if (!RunningVal) {
assert(ASI->getElementType().isVoid() &&
"Expected initialization of non-void type!");
RunningVal = SILUndef::get(ASI->getElementType(), ASI->getModule());
}
replaceLoad(cast<LoadInst>(Inst), RunningVal, ASI);
NumInstRemoved++;
continue;
}
// Remove stores and record the value that we are saving as the running
// value.
if (auto *SI = dyn_cast<StoreInst>(Inst)) {
if (SI->getDest() == ASI) {
RunningVal = SI->getSrc();
Inst->eraseFromParent();
NumInstRemoved++;
continue;
}
}
// Replace debug_value_addr with debug_value of the promoted value.
if (auto *DVAI = dyn_cast<DebugValueAddrInst>(Inst)) {
if (DVAI->getOperand() == ASI) {
if (RunningVal) {
promoteDebugValueAddr(DVAI, RunningVal, B);
} else {
// Drop debug_value_addr of uninitialized void values.
assert(ASI->getElementType().isVoid() &&
"Expected initialization of non-void type!");
DVAI->eraseFromParent();
}
}
continue;
}
// Replace destroys with a release of the value.
if (auto *DAI = dyn_cast<DestroyAddrInst>(Inst)) {
if (DAI->getOperand() == ASI) {
replaceDestroy(DAI, RunningVal);
}
continue;
}
// Remove deallocation.
if (auto *DSI = dyn_cast<DeallocStackInst>(Inst)) {
if (DSI->getOperand() == ASI) {
Inst->eraseFromParent();
NumInstRemoved++;
// No need to continue scanning after deallocation.
break;
}
}
SILValue InstVal = Inst;
// Remove dead address instructions that may be uses of the allocation.
while (InstVal->use_empty() && (isa<StructElementAddrInst>(InstVal) ||
isa<TupleElementAddrInst>(InstVal))) {
SILInstruction *I = cast<SILInstruction>(InstVal);
InstVal = I->getOperand(0);
I->eraseFromParent();
NumInstRemoved++;
}
}
}
示例9: getCalleeFunction
/// \brief Returns the callee SILFunction called at a call site, in the case
/// that the call is transparent (as in, both that the call is marked
/// with the transparent flag and that callee function is actually transparently
/// determinable from the SIL) or nullptr otherwise. This assumes that the SIL
/// is already in SSA form.
///
/// In the case that a non-null value is returned, FullArgs contains effective
/// argument operands for the callee function.
static SILFunction *
getCalleeFunction(FullApplySite AI, bool &IsThick,
SmallVectorImpl<SILValue>& CaptureArgs,
SmallVectorImpl<SILValue>& FullArgs,
PartialApplyInst *&PartialApply,
SILModule::LinkingMode Mode) {
IsThick = false;
PartialApply = nullptr;
CaptureArgs.clear();
FullArgs.clear();
for (const auto &Arg : AI.getArguments())
FullArgs.push_back(Arg);
SILValue CalleeValue = AI.getCallee();
if (LoadInst *LI = dyn_cast<LoadInst>(CalleeValue)) {
assert(CalleeValue.getResultNumber() == 0);
// Conservatively only see through alloc_box; we assume this pass is run
// immediately after SILGen
SILInstruction *ABI = dyn_cast<AllocBoxInst>(LI->getOperand());
if (!ABI)
return nullptr;
assert(LI->getOperand().getResultNumber() == 1);
// Scan forward from the alloc box to find the first store, which
// (conservatively) must be in the same basic block as the alloc box
StoreInst *SI = nullptr;
for (auto I = SILBasicBlock::iterator(ABI), E = I->getParent()->end();
I != E; ++I) {
// If we find the load instruction first, then the load is loading from
// a non-initialized alloc; this shouldn't really happen but I'm not
// making any assumptions
if (static_cast<SILInstruction*>(I) == LI)
return nullptr;
if ((SI = dyn_cast<StoreInst>(I)) && SI->getDest().getDef() == ABI) {
// We found a store that we know dominates the load; now ensure there
// are no other uses of the alloc other than loads, retains, releases
// and dealloc stacks
for (auto UI = ABI->use_begin(), UE = ABI->use_end(); UI != UE;
++UI)
if (UI.getUser() != SI && !isa<LoadInst>(UI.getUser()) &&
!isa<StrongRetainInst>(UI.getUser()) &&
!isa<StrongReleaseInst>(UI.getUser()))
return nullptr;
// We can conservatively see through the store
break;
}
}
if (!SI)
return nullptr;
CalleeValue = SI->getSrc();
}
// We are allowed to see through exactly one "partial apply" instruction or
// one "thin to thick function" instructions, since those are the patterns
// generated when using auto closures.
if (PartialApplyInst *PAI =
dyn_cast<PartialApplyInst>(CalleeValue)) {
assert(CalleeValue.getResultNumber() == 0);
for (const auto &Arg : PAI->getArguments()) {
CaptureArgs.push_back(Arg);
FullArgs.push_back(Arg);
}
CalleeValue = PAI->getCallee();
IsThick = true;
PartialApply = PAI;
} else if (ThinToThickFunctionInst *TTTFI =
dyn_cast<ThinToThickFunctionInst>(CalleeValue)) {
assert(CalleeValue.getResultNumber() == 0);
CalleeValue = TTTFI->getOperand();
IsThick = true;
}
FunctionRefInst *FRI = dyn_cast<FunctionRefInst>(CalleeValue);
if (!FRI)
return nullptr;
SILFunction *CalleeFunction = FRI->getReferencedFunction();
switch (CalleeFunction->getRepresentation()) {
case SILFunctionTypeRepresentation::Thick:
case SILFunctionTypeRepresentation::Thin:
case SILFunctionTypeRepresentation::Method:
case SILFunctionTypeRepresentation::WitnessMethod:
break;
case SILFunctionTypeRepresentation::CFunctionPointer:
case SILFunctionTypeRepresentation::ObjCMethod:
case SILFunctionTypeRepresentation::Block:
//.........这里部分代码省略.........
示例10: devirtualizeWitnessMethod
/// Generate a new apply of a function_ref to replace an apply of a
/// witness_method when we've determined the actual function we'll end
/// up calling.
static ApplySite devirtualizeWitnessMethod(ApplySite AI, SILFunction *F,
ArrayRef<Substitution> Subs) {
// We know the witness thunk and the corresponding set of substitutions
// required to invoke the protocol method at this point.
auto &Module = AI.getModule();
// Collect all the required substitutions.
//
// The complete set of substitutions may be different, e.g. because the found
// witness thunk F may have been created by a specialization pass and have
// additional generic parameters.
SmallVector<Substitution, 16> NewSubstList(Subs.begin(), Subs.end());
// Add the non-self-derived substitutions from the original application.
ArrayRef<Substitution> SubstList;
SubstList = AI.getSubstitutionsWithoutSelfSubstitution();
for (auto &origSub : SubstList)
if (!origSub.getArchetype()->isSelfDerived())
NewSubstList.push_back(origSub);
// Figure out the exact bound type of the function to be called by
// applying all substitutions.
auto CalleeCanType = F->getLoweredFunctionType();
auto SubstCalleeCanType = CalleeCanType->substGenericArgs(
Module, Module.getSwiftModule(), NewSubstList);
// Collect arguments from the apply instruction.
auto Arguments = SmallVector<SILValue, 4>();
auto ParamTypes = SubstCalleeCanType->getParameterSILTypes();
// Iterate over the non self arguments and add them to the
// new argument list, upcasting when required.
SILBuilderWithScope B(AI.getInstruction());
for (unsigned ArgN = 0, ArgE = AI.getNumArguments(); ArgN != ArgE; ++ArgN) {
SILValue A = AI.getArgument(ArgN);
auto ParamType = ParamTypes[ParamTypes.size() - AI.getNumArguments() + ArgN];
if (A.getType() != ParamType)
A = B.createUpcast(AI.getLoc(), A, ParamType);
Arguments.push_back(A);
}
// Replace old apply instruction by a new apply instruction that invokes
// the witness thunk.
SILBuilderWithScope Builder(AI.getInstruction());
SILLocation Loc = AI.getLoc();
FunctionRefInst *FRI = Builder.createFunctionRef(Loc, F);
auto SubstCalleeSILType = SILType::getPrimitiveObjectType(SubstCalleeCanType);
auto ResultSILType = SubstCalleeCanType->getSILResult();
ApplySite SAI;
if (auto *A = dyn_cast<ApplyInst>(AI))
SAI = Builder.createApply(Loc, FRI, SubstCalleeSILType,
ResultSILType, NewSubstList, Arguments,
A->isNonThrowing());
if (auto *TAI = dyn_cast<TryApplyInst>(AI))
SAI = Builder.createTryApply(Loc, FRI, SubstCalleeSILType,
NewSubstList, Arguments,
TAI->getNormalBB(), TAI->getErrorBB());
if (auto *PAI = dyn_cast<PartialApplyInst>(AI))
SAI = Builder.createPartialApply(Loc, FRI, SubstCalleeSILType,
NewSubstList, Arguments, PAI->getType());
NumWitnessDevirt++;
return SAI;
}
示例11: DEBUG
/// \brief Devirtualize an apply of a class method.
///
/// \p AI is the apply to devirtualize.
/// \p ClassOrMetatype is a class value or metatype value that is the
/// self argument of the apply we will devirtualize.
/// return the result value of the new ApplyInst if created one or null.
DevirtualizationResult swift::devirtualizeClassMethod(FullApplySite AI,
SILValue ClassOrMetatype) {
DEBUG(llvm::dbgs() << " Trying to devirtualize : " << *AI.getInstruction());
SILModule &Mod = AI.getModule();
auto *CMI = cast<ClassMethodInst>(AI.getCallee());
auto ClassOrMetatypeType = ClassOrMetatype.getType();
auto *F = getTargetClassMethod(Mod, ClassOrMetatypeType, CMI->getMember());
CanSILFunctionType GenCalleeType = F->getLoweredFunctionType();
auto Subs = getSubstitutionsForCallee(Mod, GenCalleeType,
ClassOrMetatypeType, AI);
CanSILFunctionType SubstCalleeType = GenCalleeType;
if (GenCalleeType->isPolymorphic())
SubstCalleeType = GenCalleeType->substGenericArgs(Mod, Mod.getSwiftModule(), Subs);
SILBuilderWithScope B(AI.getInstruction());
FunctionRefInst *FRI = B.createFunctionRef(AI.getLoc(), F);
// Create the argument list for the new apply, casting when needed
// in order to handle covariant indirect return types and
// contravariant argument types.
llvm::SmallVector<SILValue, 8> NewArgs;
auto Args = AI.getArguments();
auto ParamTypes = SubstCalleeType->getParameterSILTypes();
for (unsigned i = 0, e = Args.size() - 1; i != e; ++i)
NewArgs.push_back(castValueToABICompatibleType(&B, AI.getLoc(), Args[i],
Args[i].getType(),
ParamTypes[i]).getValue());
// Add the self argument, upcasting if required because we're
// calling a base class's method.
auto SelfParamTy = SubstCalleeType->getSelfParameter().getSILType();
NewArgs.push_back(castValueToABICompatibleType(&B, AI.getLoc(),
ClassOrMetatype,
ClassOrMetatypeType,
SelfParamTy).getValue());
// If we have a direct return type, make sure we use the subst callee return
// type. If we have an indirect return type, AI's return type of the empty
// tuple should be ok.
SILType ResultTy = AI.getType();
if (!SubstCalleeType->hasIndirectResult()) {
ResultTy = SubstCalleeType->getSILResult();
}
SILType SubstCalleeSILType =
SILType::getPrimitiveObjectType(SubstCalleeType);
FullApplySite NewAI;
SILBasicBlock *ResultBB = nullptr;
SILBasicBlock *NormalBB = nullptr;
SILValue ResultValue;
bool ResultCastRequired = false;
SmallVector<Operand *, 4> OriginalResultUses;
if (!isa<TryApplyInst>(AI)) {
NewAI = B.createApply(AI.getLoc(), FRI, SubstCalleeSILType, ResultTy,
Subs, NewArgs, cast<ApplyInst>(AI)->isNonThrowing());
ResultValue = SILValue(NewAI.getInstruction(), 0);
} else {
auto *TAI = cast<TryApplyInst>(AI);
// Create new normal and error BBs only if:
// - re-using a BB would create a critical edge
// - or, the result of the new apply would be of different
// type than the argument of the original normal BB.
if (TAI->getNormalBB()->getSinglePredecessor())
ResultBB = TAI->getNormalBB();
else {
ResultBB = B.getFunction().createBasicBlock();
ResultBB->createBBArg(ResultTy);
}
NormalBB = TAI->getNormalBB();
SILBasicBlock *ErrorBB = nullptr;
if (TAI->getErrorBB()->getSinglePredecessor())
ErrorBB = TAI->getErrorBB();
else {
ErrorBB = B.getFunction().createBasicBlock();
ErrorBB->createBBArg(TAI->getErrorBB()->getBBArg(0)->getType());
}
NewAI = B.createTryApply(AI.getLoc(), FRI, SubstCalleeSILType,
Subs, NewArgs,
ResultBB, ErrorBB);
if (ErrorBB != TAI->getErrorBB()) {
B.setInsertionPoint(ErrorBB);
B.createBranch(TAI->getLoc(), TAI->getErrorBB(),
{ErrorBB->getBBArg(0)});
}
//.........这里部分代码省略.........
示例12: switch
ManagedValue SILGenFunction::emitExistentialErasure(
SILLocation loc,
CanType concreteFormalType,
const TypeLowering &concreteTL,
const TypeLowering &existentialTL,
const ArrayRef<ProtocolConformance *> &conformances,
SGFContext C,
llvm::function_ref<ManagedValue (SGFContext)> F) {
// Mark the needed conformances as used.
for (auto *conformance : conformances)
SGM.useConformance(conformance);
switch (existentialTL.getLoweredType().getObjectType()
.getPreferredExistentialRepresentation(SGM.M, concreteFormalType)) {
case ExistentialRepresentation::None:
llvm_unreachable("not an existential type");
case ExistentialRepresentation::Metatype: {
assert(existentialTL.isLoadable());
SILValue metatype = F(SGFContext()).getUnmanagedValue();
assert(metatype.getType().castTo<AnyMetatypeType>()->getRepresentation()
== MetatypeRepresentation::Thick);
auto upcast =
B.createInitExistentialMetatype(loc, metatype,
existentialTL.getLoweredType(),
conformances);
return ManagedValue::forUnmanaged(upcast);
}
case ExistentialRepresentation::Class: {
assert(existentialTL.isLoadable());
ManagedValue sub = F(SGFContext());
SILValue v = B.createInitExistentialRef(loc,
existentialTL.getLoweredType(),
concreteFormalType,
sub.getValue(),
conformances);
return ManagedValue(v, sub.getCleanup());
}
case ExistentialRepresentation::Boxed: {
// Allocate the existential.
auto box = B.createAllocExistentialBox(loc,
existentialTL.getLoweredType(),
concreteFormalType,
concreteTL.getLoweredType(),
conformances);
auto existential = box->getExistentialResult();
auto valueAddr = box->getValueAddressResult();
// Initialize the concrete value in-place.
InitializationPtr init(
new ExistentialInitialization(existential, valueAddr, concreteFormalType,
ExistentialRepresentation::Boxed,
*this));
ManagedValue mv = F(SGFContext(init.get()));
if (!mv.isInContext()) {
mv.forwardInto(*this, loc, init->getAddress());
init->finishInitialization(*this);
}
return emitManagedRValueWithCleanup(existential);
}
case ExistentialRepresentation::Opaque: {
// Allocate the existential.
SILValue existential =
getBufferForExprResult(loc, existentialTL.getLoweredType(), C);
// Allocate the concrete value inside the container.
SILValue valueAddr = B.createInitExistentialAddr(
loc, existential,
concreteFormalType,
concreteTL.getLoweredType(),
conformances);
// Initialize the concrete value in-place.
InitializationPtr init(
new ExistentialInitialization(existential, valueAddr, concreteFormalType,
ExistentialRepresentation::Opaque,
*this));
ManagedValue mv = F(SGFContext(init.get()));
if (!mv.isInContext()) {
mv.forwardInto(*this, loc, init->getAddress());
init->finishInitialization(*this);
}
return manageBufferForExprResult(existential, existentialTL, C);
}
}
}
示例13: isNonInoutIndirectSILArgument
static bool isNonInoutIndirectSILArgument(SILValue Arg,
SILArgumentConvention ArgConvention) {
return !Arg->getType().isObject() && ArgConvention.isIndirectConvention() &&
ArgConvention != SILArgumentConvention::Indirect_Inout &&
ArgConvention != SILArgumentConvention::Indirect_InoutAliasable;
}
示例14: removeAndReleaseArray
// Attempt to remove the array allocated at NewAddrValue and release its
// refcounted elements.
//
// This is tightly coupled with the implementation of array.uninitialized.
// The call to allocate an uninitialized array returns two values:
// (Array<E> ArrayBase, UnsafeMutable<E> ArrayElementStorage)
//
// TODO: This relies on the lowest level array.uninitialized not being
// inlined. To do better we could either run this pass before semantic inlining,
// or we could also handle calls to array.init.
static bool removeAndReleaseArray(SILValue NewArrayValue) {
TupleExtractInst *ArrayDef = nullptr;
TupleExtractInst *StorageAddress = nullptr;
for (auto *Op : NewArrayValue->getUses()) {
auto *TupleElt = dyn_cast<TupleExtractInst>(Op->getUser());
if (!TupleElt)
return false;
switch (TupleElt->getFieldNo()) {
default:
return false;
case 0:
ArrayDef = TupleElt;
break;
case 1:
StorageAddress = TupleElt;
break;
}
}
if (!ArrayDef)
return false; // No Array object to delete.
assert(!ArrayDef->getType().isTrivial(ArrayDef->getModule()) &&
"Array initialization should produce the proper tuple type.");
// Analyze the array object uses.
DeadObjectAnalysis DeadArray(ArrayDef);
if (!DeadArray.analyze())
return false;
// Require all stores to be into the array storage not the array object,
// otherwise bail.
bool HasStores = false;
DeadArray.visitStoreLocations([&](ArrayRef<StoreInst*>){ HasStores = true; });
if (HasStores)
return false;
// Remove references to empty arrays.
if (!StorageAddress) {
removeInstructions(DeadArray.getAllUsers());
return true;
}
assert(StorageAddress->getType().isTrivial(ArrayDef->getModule()) &&
"Array initialization should produce the proper tuple type.");
// Analyze the array storage uses.
DeadObjectAnalysis DeadStorage(StorageAddress);
if (!DeadStorage.analyze())
return false;
// Find array object lifetime.
ValueLifetimeAnalysis VLA(ArrayDef);
ValueLifetime Lifetime = VLA.computeFromUserList(DeadArray.getAllUsers());
// Check that all storage users are in the Array's live blocks and never the
// last user.
for (auto *User : DeadStorage.getAllUsers()) {
auto *BB = User->getParent();
if (!VLA.successorHasLiveIn(BB)
&& VLA.findLastSpecifiedUseInBlock(BB) == User) {
return false;
}
}
// For each store location, insert releases.
// This makes a strong assumption that the allocated object is released on all
// paths in which some object initialization occurs.
SILSSAUpdater SSAUp;
DeadStorage.visitStoreLocations([&] (ArrayRef<StoreInst*> Stores) {
insertReleases(Stores, Lifetime.getLastUsers(), SSAUp);
});
// Delete all uses of the dead array and its storage address.
removeInstructions(DeadArray.getAllUsers());
removeInstructions(DeadStorage.getAllUsers());
return true;
}
示例15: assert
/// Find all closures that may be propagated into the given function-type value.
///
/// Searches the use-def chain from the given value upward until a partial_apply
/// is reached. Populates `results` with the set of partial_apply instructions.
///
/// `funcVal` may be either a function type or an Optional function type. This
/// might be called on a directly applied value or on a call argument, which may
/// in turn be applied within the callee.
void swift::findClosuresForFunctionValue(
SILValue funcVal, TinyPtrVector<PartialApplyInst *> &results) {
SILType funcTy = funcVal->getType();
// Handle `Optional<@convention(block) @noescape (_)->(_)>`
if (auto optionalObjTy = funcTy.getOptionalObjectType())
funcTy = optionalObjTy;
assert(funcTy.is<SILFunctionType>());
SmallVector<SILValue, 4> worklist;
// Avoid exponential path exploration and prevent duplicate results.
llvm::SmallDenseSet<SILValue, 8> visited;
auto worklistInsert = [&](SILValue V) {
if (visited.insert(V).second)
worklist.push_back(V);
};
worklistInsert(funcVal);
while (!worklist.empty()) {
SILValue V = worklist.pop_back_val();
if (auto *I = V->getDefiningInstruction()) {
// Look through copies, borrows, and conversions.
//
// Handle copy_block and copy_block_without_actually_escaping before
// calling findClosureStoredIntoBlock.
if (SingleValueInstruction *SVI = getSingleValueCopyOrCast(I)) {
worklistInsert(SVI->getOperand(0));
continue;
}
}
// Look through Optionals.
if (V->getType().getOptionalObjectType()) {
auto *EI = dyn_cast<EnumInst>(V);
if (EI && EI->hasOperand()) {
worklistInsert(EI->getOperand());
}
// Ignore the .None case.
continue;
}
// Look through Phis.
//
// This should be done before calling findClosureStoredIntoBlock.
if (auto *arg = dyn_cast<SILPhiArgument>(V)) {
SmallVector<std::pair<SILBasicBlock *, SILValue>, 2> blockArgs;
arg->getIncomingPhiValues(blockArgs);
for (auto &blockAndArg : blockArgs)
worklistInsert(blockAndArg.second);
continue;
}
// Look through ObjC closures.
auto fnType = V->getType().getAs<SILFunctionType>();
if (fnType
&& fnType->getRepresentation() == SILFunctionTypeRepresentation::Block) {
if (SILValue storedClosure = findClosureStoredIntoBlock(V))
worklistInsert(storedClosure);
continue;
}
if (auto *PAI = dyn_cast<PartialApplyInst>(V)) {
SILValue thunkArg = isPartialApplyOfReabstractionThunk(PAI);
if (thunkArg) {
// Handle reabstraction thunks recursively. This may reabstract over
// @convention(block).
worklistInsert(thunkArg);
continue;
}
results.push_back(PAI);
continue;
}
// Ignore other unrecognized values that feed this applied argument.
}
}