本文整理汇总了C++中QualType::isAnyComplexType方法的典型用法代码示例。如果您正苦于以下问题:C++ QualType::isAnyComplexType方法的具体用法?C++ QualType::isAnyComplexType怎么用?C++ QualType::isAnyComplexType使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类QualType
的用法示例。
在下文中一共展示了QualType::isAnyComplexType方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ComplexPairTy
LValue ComplexExprEmitter::
EmitCompoundAssignLValue(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
RValue &Val) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
QualType LHSTy = E->getLHS()->getType();
BinOpInfo OpInfo;
// Load the RHS and LHS operands.
// __block variables need to have the rhs evaluated first, plus this should
// improve codegen a little.
OpInfo.Ty = E->getComputationResultType();
// The RHS should have been converted to the computation type.
assert(OpInfo.Ty->isAnyComplexType());
assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
E->getRHS()->getType()));
OpInfo.RHS = Visit(E->getRHS());
LValue LHS = CGF.EmitLValue(E->getLHS());
// Load from the l-value and convert it.
if (LHSTy->isAnyComplexType()) {
ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, E->getExprLoc());
OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
} else {
llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
}
// Expand the binary operator.
ComplexPairTy Result = (this->*Func)(OpInfo);
// Truncate the result and store it into the LHS lvalue.
if (LHSTy->isAnyComplexType()) {
ComplexPairTy ResVal = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
EmitStoreOfComplex(ResVal, LHS, /*isInit*/ false);
Val = RValue::getComplex(ResVal);
} else {
llvm::Value *ResVal =
CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy);
CGF.EmitStoreOfScalar(ResVal, LHS, /*isInit*/ false);
Val = RValue::get(ResVal);
}
return LHS;
}
示例2: if
void
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
QualType type = LV.getType();
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
// Storing "i32 0" to a zero'd memory location is a noop.
} else if (isa<ImplicitValueInitExpr>(E)) {
EmitNullInitializationToLValue(LV);
} else if (type->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
CGF.EmitStoreThroughLValue(RV, LV);
} else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(type)) {
CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
Dest.isZeroed()));
} else if (LV.isSimple()) {
CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
} else {
CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
}
}
示例3: EmitDeclInit
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *DeclPtr) {
assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
ASTContext &Context = CGF.getContext();
CharUnits alignment = Context.getDeclAlign(&D);
QualType type = D.getType();
LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
const Expr *Init = D.getInit();
if (!CGF.hasAggregateLLVMType(type)) {
CodeGenModule &CGM = CGF.CGM;
if (lv.isObjCStrong())
CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr, D.isThreadSpecified());
else if (lv.isObjCWeak())
CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
DeclPtr);
else
CGF.EmitScalarInit(Init, &D, lv, false);
} else if (type->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
} else {
CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
}
}
示例4: EmitDeclInit
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *DeclPtr) {
assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
CodeGenModule &CGM = CGF.CGM;
ASTContext &Context = CGF.getContext();
const Expr *Init = D.getInit();
QualType T = D.getType();
bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();
if (!CGF.hasAggregateLLVMType(T)) {
llvm::Value *V = CGF.EmitScalarExpr(Init);
CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
} else {
CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
// Avoid generating destructor(s) for initialized objects.
if (!isa<CXXConstructExpr>(Init))
return;
const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
if (Array)
T = Context.getBaseElementType(Array);
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return;
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (RD->hasTrivialDestructor())
return;
CXXDestructorDecl *Dtor = RD->getDestructor(Context);
llvm::Constant *DtorFn;
if (Array) {
DtorFn =
CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor,
Array,
DeclPtr);
const llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
} else
DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
}
}
示例5: makeZeroVal
DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
if (Loc::isLocType(type))
return makeNull();
if (type->isIntegralOrEnumerationType())
return makeIntVal(0, type);
if (type->isArrayType() || type->isRecordType() || type->isVectorType() ||
type->isAnyComplexType())
return makeCompoundVal(type, BasicVals.getEmptySValList());
// FIXME: Handle floats.
return UnknownVal();
}
示例6: VisitInitListExpr
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
QualType T = getContext().getCanonicalType(IE->getType());
unsigned NumInitElements = IE->getNumInits();
if (!IE->isGLValue() &&
(T->isArrayType() || T->isRecordType() || T->isVectorType() ||
T->isAnyComplexType())) {
llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
// Handle base case where the initializer has no elements.
// e.g: static int* myArray[] = {};
if (NumInitElements == 0) {
SVal V = svalBuilder.makeCompoundVal(T, vals);
B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
return;
}
for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
ei = IE->rend(); it != ei; ++it) {
SVal V = state->getSVal(cast<Expr>(*it), LCtx);
vals = getBasicVals().prependSVal(V, vals);
}
B.generateNode(IE, Pred,
state->BindExpr(IE, LCtx,
svalBuilder.makeCompoundVal(T, vals)));
return;
}
// Handle scalars: int{5} and int{} and GLvalues.
// Note, if the InitListExpr is a GLvalue, it means that there is an address
// representing it, so it must have a single init element.
assert(NumInitElements <= 1);
SVal V;
if (NumInitElements == 0)
V = getSValBuilder().makeZeroVal(T);
else
V = state->getSVal(IE->getInit(0), LCtx);
B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
}
示例7: VisitInitListExpr
void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
QualType T = getContext().getCanonicalType(IE->getType());
unsigned NumInitElements = IE->getNumInits();
if (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
T->isAnyComplexType()) {
llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
// Handle base case where the initializer has no elements.
// e.g: static int* myArray[] = {};
if (NumInitElements == 0) {
SVal V = svalBuilder.makeCompoundVal(T, vals);
B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
return;
}
for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
ei = IE->rend(); it != ei; ++it) {
SVal V = state->getSVal(cast<Expr>(*it), LCtx);
if (dyn_cast_or_null<CXXTempObjectRegion>(V.getAsRegion()))
V = UnknownVal();
vals = getBasicVals().consVals(V, vals);
}
B.generateNode(IE, Pred,
state->BindExpr(IE, LCtx,
svalBuilder.makeCompoundVal(T, vals)));
return;
}
// Handle scalars: int{5} and int{}.
assert(NumInitElements <= 1);
SVal V;
if (NumInitElements == 0)
V = getSValBuilder().makeZeroVal(T);
else
V = state->getSVal(IE->getInit(0), LCtx);
B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
}
示例8: EmitAggregateCopy
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
// object, then the overlap shall be exact and the two objects shall have
// qualified or unqualified versions of a compatible type."
//
// memcpy is not defined if the source and destination pointers are exactly
// equal, but other compilers do this optimization, and almost every memcpy
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
if (SrcPtr->getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
// Get size and alignment info for this aggregate.
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
// FIXME: Handle variable sized types.
const llvm::Type *IntPtr =
llvm::IntegerType::get(VMContext, LLVMPointerWidth);
// FIXME: If we have a volatile struct, the optimizer can remove what might
// appear to be `extra' memory ops:
//
// volatile struct { int i; } a, b;
//
// int main() {
// a = b;
// a = b;
// }
//
// we need to use a differnt call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
Builder.CreateCall4(CGM.getMemCpyFn(),
DestPtr, SrcPtr,
// TypeInfo.first describes size in bits.
llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
TypeInfo.second/8));
}
示例9: EmitNullInitializationToLValue
void
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
if (isa<ImplicitValueInitExpr>(E)) {
EmitNullInitializationToLValue(LV, T);
} else if (T->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
CGF.EmitStoreThroughLValue(RV, LV, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(T)) {
CGF.EmitAnyExpr(E, LV.getAddress(), false);
} else {
CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
}
}
示例10: if
void
AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
// Storing "i32 0" to a zero'd memory location is a noop.
} else if (isa<ImplicitValueInitExpr>(E)) {
EmitNullInitializationToLValue(LV, T);
} else if (T->isReferenceType()) {
RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
CGF.EmitStoreThroughLValue(RV, LV, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
} else if (CGF.hasAggregateLLVMType(T)) {
CGF.EmitAggExpr(E, AggValueSlot::forAddr(LV.getAddress(), false, true,
false, Dest.isZeroed()));
} else {
CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV, T);
}
}
示例11: EmitDeclInit
static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *DeclPtr) {
assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
ASTContext &Context = CGF.getContext();
const Expr *Init = D.getInit();
QualType T = D.getType();
bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();
if (!CGF.hasAggregateLLVMType(T)) {
llvm::Value *V = CGF.EmitScalarExpr(Init);
CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T);
} else if (T->isAnyComplexType()) {
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
} else {
CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
}
}
示例12: hasAggregateLLVMType
bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
T->isObjCObjectType();
}
示例13: EmitAggregateCopy
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
bool isVolatile, unsigned Alignment) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
if (getContext().getLangOptions().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
assert((Record->hasTrivialCopyConstructor() ||
Record->hasTrivialCopyAssignment() ||
Record->hasTrivialMoveConstructor() ||
Record->hasTrivialMoveAssignment()) &&
"Trying to aggregate-copy a type without a trivial copy "
"constructor or assignment operator");
// Ignore empty classes in C++.
if (Record->isEmpty())
return;
}
}
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
// object, then the overlap shall be exact and the two objects shall have
// qualified or unqualified versions of a compatible type."
//
// memcpy is not defined if the source and destination pointers are exactly
// equal, but other compilers do this optimization, and almost every memcpy
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
// Get size and alignment info for this aggregate.
std::pair<CharUnits, CharUnits> TypeInfo =
getContext().getTypeInfoInChars(Ty);
if (!Alignment)
Alignment = TypeInfo.second.getQuantity();
// FIXME: Handle variable sized types.
// FIXME: If we have a volatile struct, the optimizer can remove what might
// appear to be `extra' memory ops:
//
// volatile struct { int i; } a, b;
//
// int main() {
// a = b;
// a = b;
// }
//
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
llvm::Type *DBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
DestPtr = Builder.CreateBitCast(DestPtr, DBP);
llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
llvm::Type *SBP =
llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
// Don't do any of the memmove_collectable tests if GC isn't set.
if (CGM.getLangOptions().getGC() == LangOptions::NonGC) {
// fall through
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
RecordDecl *Record = RecordTy->getDecl();
if (Record->hasObjectMember()) {
CharUnits size = TypeInfo.first;
llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
}
} else if (Ty->isArrayType()) {
QualType BaseType = getContext().getBaseElementType(Ty);
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasObjectMember()) {
CharUnits size = TypeInfo.first;
llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
llvm::Value *SizeVal =
llvm::ConstantInt::get(SizeTy, size.getQuantity());
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
}
}
}
Builder.CreateMemCpy(DestPtr, SrcPtr,
llvm::ConstantInt::get(IntPtrTy,
TypeInfo.first.getQuantity()),
Alignment, isVolatile);
}
示例14: ComplexPairTy
LValue ComplexExprEmitter::
EmitCompoundAssignLValue(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
RValue &Val) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
QualType LHSTy = E->getLHS()->getType();
if (const AtomicType *AT = LHSTy->getAs<AtomicType>())
LHSTy = AT->getValueType();
BinOpInfo OpInfo;
// Load the RHS and LHS operands.
// __block variables need to have the rhs evaluated first, plus this should
// improve codegen a little.
OpInfo.Ty = E->getComputationResultType();
QualType ComplexElementTy = cast<ComplexType>(OpInfo.Ty)->getElementType();
// The RHS should have been converted to the computation type.
if (E->getRHS()->getType()->isRealFloatingType()) {
assert(
CGF.getContext()
.hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType()));
OpInfo.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
} else {
assert(CGF.getContext()
.hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType()));
OpInfo.RHS = Visit(E->getRHS());
}
LValue LHS = CGF.EmitLValue(E->getLHS());
// Load from the l-value and convert it.
if (LHSTy->isAnyComplexType()) {
ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, E->getExprLoc());
OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
} else {
llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
// For floating point real operands we can directly pass the scalar form
// to the binary operator emission and potentially get more efficient code.
if (LHSTy->isRealFloatingType()) {
if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy);
OpInfo.LHS = ComplexPairTy(LHSVal, nullptr);
} else {
OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
}
}
// Expand the binary operator.
ComplexPairTy Result = (this->*Func)(OpInfo);
// Truncate the result and store it into the LHS lvalue.
if (LHSTy->isAnyComplexType()) {
ComplexPairTy ResVal = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
EmitStoreOfComplex(ResVal, LHS, /*isInit*/ false);
Val = RValue::getComplex(ResVal);
} else {
llvm::Value *ResVal =
CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy);
CGF.EmitStoreOfScalar(ResVal, LHS, /*isInit*/ false);
Val = RValue::get(ResVal);
}
return LHS;
}
示例15: EmitAggregateClear
void CodeGenFunction::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
EmitMemSetToZero(DestPtr, Ty);
}