本文整理汇总了C++中LValue类的典型用法代码示例。如果您正苦于以下问题:C++ LValue类的具体用法?C++ LValue怎么用?C++ LValue使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LValue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: EmitOMPAtomicReadExpr
static void EmitOMPAtomicReadExpr(CodeGenFunction &CGF, bool IsSeqCst,
const Expr *X, const Expr *V,
SourceLocation Loc) {
// v = x;
assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
LValue XLValue = CGF.EmitLValue(X);
LValue VLValue = CGF.EmitLValue(V);
RValue Res = XLValue.isGlobalReg() ? CGF.EmitLoadOfLValue(XLValue, Loc)
: CGF.EmitAtomicLoad(XLValue, Loc);
// OpenMP, 2.12.6, atomic Construct
// Any atomic construct with a seq_cst clause forces the atomically
// performed operation to include an implicit flush operation without a
// list.
if (IsSeqCst)
CGF.CGM.getOpenMPRuntime().EmitOMPFlush(CGF, llvm::None, Loc);
switch (CGF.getEvaluationKind(V->getType())) {
case TEK_Scalar:
CGF.EmitStoreOfScalar(
convertToScalarValue(CGF, Res, X->getType(), V->getType()), VLValue);
break;
case TEK_Complex:
CGF.EmitStoreOfComplex(
convertToComplexValue(CGF, Res, X->getType(), V->getType()), VLValue,
/*isInit=*/false);
break;
case TEK_Aggregate:
llvm_unreachable("Must be a scalar or complex.");
}
}
示例2: assert
void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// For an assignment to work, the value on the right has
// to be compatible with the value on the left.
assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
E->getRHS()->getType())
&& "Invalid assignment");
// FIXME: __block variables need the RHS evaluated first!
LValue LHS = CGF.EmitLValue(E->getLHS());
// We have to special case property setters, otherwise we must have
// a simple lvalue (no aggregates inside vectors, bitfields).
if (LHS.isPropertyRef()) {
AggValueSlot Slot = EnsureSlot(E->getRHS()->getType());
CGF.EmitAggExpr(E->getRHS(), Slot);
CGF.EmitStoreThroughPropertyRefLValue(Slot.asRValue(), LHS);
} else {
bool GCollection = false;
if (CGF.getContext().getLangOptions().getGCMode())
GCollection = TypeRequiresGCollection(E->getLHS()->getType());
// Codegen the RHS so that it stores directly into the LHS.
AggValueSlot LHSSlot = AggValueSlot::forLValue(LHS, true,
GCollection);
CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
EmitFinalDestCopy(E, LHS, true);
}
}
示例3: ConvertType
llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr,
std::string &ConstraintStr) {
llvm::Value *Arg;
if (Info.allowsRegister() || !Info.allowsMemory()) {
const llvm::Type *Ty = ConvertType(InputExpr->getType());
if (Ty->isSingleValueType()) {
Arg = EmitScalarExpr(InputExpr);
} else {
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
Ty = llvm::IntegerType::get(VMContext, Size);
Ty = llvm::PointerType::getUnqual(Ty);
Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty));
} else {
Arg = Dest.getAddress();
ConstraintStr += '*';
}
}
} else {
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
Arg = Dest.getAddress();
ConstraintStr += '*';
}
return Arg;
}
示例4: EmitFinalDestCopy
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
Src.isVolatileQualified()),
Ignore);
}
示例5: EmitDeclInit
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *DeclPtr) {
assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
ASTContext &Context = CGF.getContext();
CharUnits alignment = Context.getDeclAlign(&D);
QualType type = D.getType();
LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
const Expr *Init = D.getInit();
if (!CGF.hasAggregateLLVMType(type)) {
CodeGenModule &CGM = CGF.CGM;
if (lv.isObjCStrong())
CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr, D.isThreadSpecified());
else if (lv.isObjCWeak())
CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr);
else
CGF.EmitScalarInit(Init, &D, lv, false);
} else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
} else {
CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
}
}
示例6: context
// If an l-value is found, return it. Otherwise, return an r-value.
bool
SemanticAnalysis::svalue(Expression *expr, SValue *outp)
{
// Demand only RValues.
SaveAndSet<ValueContext> context(&value_context_, kLValue);
LValue lval;
// Between setting the "outparams" and returning, nothing should call us.
assert(!outp_ && !hir_);
outp_ = &lval;
expr->accept(this);
outp_ = nullptr;
// We should not have received both an r-value and an l-value.
assert(!hir_ || lval.kind() == LValue::Error);
if (!hir_ && lval.kind() == LValue::Error)
return false;
if (hir_)
*outp = SValue(ReturnAndVoid(hir_));
else
*outp = SValue(lval);
return true;
}
示例7: EmitDeclInit
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
ConstantAddress DeclPtr) {
assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
QualType type = D.getType();
LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
const Expr *Init = D.getInit();
switch (CGF.getEvaluationKind(type)) {
case TEK_Scalar: {
CodeGenModule &CGM = CGF.CGM;
if (lv.isObjCStrong())
CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr, D.getTLSKind());
else if (lv.isObjCWeak())
CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr);
else
CGF.EmitScalarInit(Init, &D, lv, false);
return;
}
case TEK_Complex:
CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
return;
case TEK_Aggregate:
CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
return;
}
llvm_unreachable("bad evaluation kind");
}
示例8: emitCopyIntoMemory
/// Copy an r-value into memory as part of storing to an atomic type.
/// This needs to create a bit-pattern suitable for atomic operations.
void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
// If we have an r-value, the rvalue should be of the atomic type,
// which means that the caller is responsible for having zeroed
// any padding. Just do an aggregate copy of that type.
if (rvalue.isAggregate()) {
CGF.EmitAggregateCopy(dest.getAddress(),
rvalue.getAggregateAddr(),
getAtomicType(),
(rvalue.isVolatileQualified()
|| dest.isVolatileQualified()),
dest.getAlignment());
return;
}
// Okay, otherwise we're copying stuff.
// Zero out the buffer if necessary.
emitMemSetZeroIfNecessary(dest);
// Drill past the padding if present.
dest = projectValue(dest);
// Okay, store the rvalue in.
if (rvalue.isScalar()) {
CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
} else {
CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
}
}
示例9: if
void
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
QualType type = LV.getType();
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
// Storing "i32 0" to a zero'd memory location is a noop.
} else if (isa<ImplicitValueInitExpr>(E)) {
EmitNullInitializationToLValue(LV);
} else if (type->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
CGF.EmitStoreThroughLValue(RV, LV);
} else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(type)) {
CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
Dest.isZeroed()));
} else if (LV.isSimple()) {
CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
} else {
CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
}
}
示例10: assert
LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
llvm::Value *Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
EmitAggExpr(E, Temp, LV.isVolatileQualified());
return LV;
}
示例11: LookupFieldBitOffset
LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *OID,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
QualType IvarTy = Ivar->getType();
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
if (!Ivar->isBitField()) {
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
LV.getQuals().addCVRQualifiers(CVRQualifiers);
return LV;
}
// We need to compute an access strategy for this bit-field. We are given the
// offset to the first byte in the bit-field, the sub-byte offset is taken
// from the original layout. We reuse the normal bit-field access strategy by
// treating this as an access to a struct where the bit-field is in byte 0,
// and adjust the containing type size as appropriate.
//
// FIXME: Note that currently we make a very conservative estimate of the
// alignment of the bit-field, because (a) it is not clear what guarantees the
// runtime makes us, and (b) we don't have a way to specify that the struct is
// at an alignment plus offset.
//
// Note, there is a subtle invariant here: we can only call this routine on
// non-synthesized ivars but we may be called for synthesized ivars. However,
// a synthesized ivar can never be a bit-field, so this is safe.
uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, nullptr, Ivar);
uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign();
uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
CharUnits StorageSize =
CGF.CGM.getContext().toCharUnitsFromBits(
llvm::RoundUpToAlignment(BitOffset + BitFieldSize, AlignmentBits));
CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits);
// Allocate a new CGBitFieldInfo object to describe this access.
//
// FIXME: This is incredibly wasteful, these should be uniqued or part of some
// layout object. However, this is blocked on other cleanups to the
// Objective-C code, so for now we just live with allocating a bunch of these
// objects.
CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
CGF.CGM.getContext().toBits(StorageSize),
CharUnits::fromQuantity(0)));
V = CGF.Builder.CreateBitCast(V,
llvm::Type::getIntNPtrTy(CGF.getLLVMContext(),
Info->StorageSize));
return LValue::MakeBitfield(V, *Info,
IvarTy.withCVRQualifiers(CVRQualifiers),
Alignment);
}
示例12: Visit
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
if (!DestPtr && E->getCastKind() != CK_Dynamic) {
Visit(E->getSubExpr());
return;
}
switch (E->getCastKind()) {
default: assert(0 && "Unhandled cast kind!");
case CK_Dynamic: {
assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
// FIXME: Do we also need to handle property references here?
if (LV.isSimple())
CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
if (DestPtr)
CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
break;
}
case CK_ToUnion: {
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
QualType PtrTy = CGF.getContext().getPointerType(Ty);
llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr,
CGF.ConvertType(PtrTy));
EmitInitializationToLValue(E->getSubExpr(), CGF.MakeAddrLValue(CastPtr, Ty),
Ty);
break;
}
case CK_DerivedToBase:
case CK_BaseToDerived:
case CK_UncheckedDerivedToBase: {
assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
"should have been unpacked before we got here");
break;
}
// FIXME: Remove the CK_Unknown check here.
case CK_Unknown:
case CK_NoOp:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
E->getType()) &&
"Implicit cast types must be compatible");
Visit(E->getSubExpr());
break;
case CK_LValueBitCast:
llvm_unreachable("there are no lvalue bit-casts on aggregates");
break;
}
}
示例13: emitMemSetZeroIfNecessary
void AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
llvm::Value *addr = dest.getAddress();
if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
return;
CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
AtomicSizeInBits / 8,
dest.getAlignment().getQuantity());
}
示例14: createCallback
/// Emit a materializeForSet callback that stores the value from the
/// result buffer back into the l-value.
SILFunction *
MaterializeForSetEmitter::createSetterCallback(SILFunction &F,
const TypeLowering *indicesTL,
CanType indicesFormalType) {
return createCallback(F, [&](SILGenFunction &SGF, SILLocation loc,
SILValue value, SILValue callbackBuffer,
SILValue self) {
// If this is a subscript, we need to handle the indices in the
// callback storage.
RValue indices;
if (indicesTL) {
assert(isa<SubscriptDecl>(WitnessStorage));
SILType indicesTy = indicesTL->getLoweredType();
// Enter a cleanup to deallocate the callback storage.
SGF.Cleanups.pushCleanup<DeallocateValueBuffer>(indicesTy,
callbackBuffer);
// Project the value out, loading if necessary, and take
// ownership of it.
SILValue indicesV =
SGF.B.createProjectValueBuffer(loc, indicesTy, callbackBuffer);
if (indicesTL->isLoadable() || !SGF.silConv.useLoweredAddresses())
indicesV = indicesTL->emitLoad(SGF.B, loc, indicesV,
LoadOwnershipQualifier::Take);
ManagedValue mIndices =
SGF.emitManagedRValueWithCleanup(indicesV, *indicesTL);
// Explode as an r-value.
indices = RValue(SGF, loc, indicesFormalType, mIndices);
}
// The callback gets the address of 'self' at +0.
ManagedValue mSelf = ManagedValue::forLValue(self);
// That's enough to build the l-value.
LValue lvalue = buildLValue(SGF, loc, mSelf, std::move(indices),
AccessKind::Write);
// The callback gets the value at +1.
auto &valueTL = SGF.getTypeLowering(lvalue.getTypeOfRValue());
value = SGF.B.createPointerToAddress(
loc, value, valueTL.getLoweredType().getAddressType(),
/*isStrict*/ true, /*isInvariant*/ false);
if (valueTL.isLoadable() || !SGF.silConv.useLoweredAddresses())
value = valueTL.emitLoad(SGF.B, loc, value, LoadOwnershipQualifier::Take);
ManagedValue mValue = SGF.emitManagedRValueWithCleanup(value, valueTL);
RValue rvalue(SGF, loc, lvalue.getSubstFormalType(), mValue);
// Finally, call the setter.
SGF.emitAssignToLValue(loc, std::move(rvalue), std::move(lvalue));
});
}
示例15: rvalue
void
SemanticAnalysis::visitAssignment(Assignment *node)
{
LValue lval;
if (!lvalue(node->lvalue(), &lval))
return;
HIR *hir = rvalue(node->expression());
if ((hir = coerce(hir, lval.type(), Coerce_Assign)) == nullptr)
return;
hir_ = new (pool_) HStore(node, lval.type(), node->token(), lval, hir);
}