本文整理汇总了C++中QualType::isVariablyModifiedType方法的典型用法代码示例。如果您正苦于以下问题:C++ QualType::isVariablyModifiedType方法的具体用法?C++ QualType::isVariablyModifiedType怎么用?C++ QualType::isVariablyModifiedType使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类QualType
的用法示例。
在下文中一共展示了QualType::isVariablyModifiedType方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: EmitDecl
void CodeGenFunction::EmitDecl(const Decl &D) {
switch (D.getKind()) {
default:
CGM.ErrorUnsupported(&D, "decl");
return;
case Decl::ParmVar:
assert(0 && "Parmdecls should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
case Decl::EnumConstant: // enum ? { X = ? }
case Decl::CXXRecord: // struct/union/class X; [C++]
case Decl::Using: // using X; [C++]
case Decl::UsingShadow:
case Decl::UsingDirective: // using namespace X; [C++]
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
// None of these decls require codegen support.
return;
case Decl::Var: {
const VarDecl &VD = cast<VarDecl>(D);
assert(VD.isBlockVarDecl() &&
"Should not see file-scope variables inside a function!");
return EmitBlockVarDecl(VD);
}
case Decl::Typedef: { // typedef int X;
const TypedefDecl &TD = cast<TypedefDecl>(D);
QualType Ty = TD.getUnderlyingType();
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
}
}
}
示例2: EmitLocalBlockVarDecl
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
QualType Ty = D.getType();
bool isByRef = D.hasAttr<BlocksAttr>();
bool needsDispose = false;
unsigned Align = 0;
bool IsSimpleConstantInitializer = false;
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
// If this value is an array or struct, is POD, and if the initializer is
// a staticly determinable constant, try to optimize it.
if (D.getInit() && !isByRef &&
(Ty->isArrayType() || Ty->isRecordType()) &&
Ty->isPODType() &&
D.getInit()->isConstantInitializer(getContext())) {
// If this variable is marked 'const', emit the value as a global.
if (CGM.getCodeGenOpts().MergeAllConstants &&
Ty.isConstant(getContext())) {
EmitStaticBlockVarDecl(D);
return;
}
IsSimpleConstantInitializer = true;
}
// A normal fixed sized variable becomes an alloca in the entry block.
const llvm::Type *LTy = ConvertTypeForMem(Ty);
if (isByRef)
LTy = BuildByRefType(&D);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
Alloc->setName(D.getNameAsString());
Align = getContext().getDeclAlignInBytes(&D);
if (isByRef)
Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
Alloc->setAlignment(Align);
DeclPtr = Alloc;
} else {
// Targets that don't support recursion emit locals as globals.
const char *Class =
D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
DeclPtr = CreateStaticBlockVarDecl(D, Class,
llvm::GlobalValue
::InternalLinkage);
}
// FIXME: Can this happen?
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
} else {
EnsureInsertPoint();
if (!DidCallStackSave) {
// Save the stack.
const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
Builder.CreateStore(V, Stack);
DidCallStackSave = true;
{
// Push a cleanup block and restore the stack there.
DelayedCleanupBlock scope(*this);
V = Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
Builder.CreateCall(F, V);
}
}
// Get the element type.
const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
const llvm::Type *LElemPtrTy =
llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
llvm::Value *VLASize = EmitVLASize(Ty);
// Downcast the VLA size expression
VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
false, "tmp");
// Allocate memory for the array.
llvm::AllocaInst *VLA =
Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
VLA->setAlignment(getContext().getDeclAlignInBytes(&D));
DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
}
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
//.........这里部分代码省略.........
示例3: EmitVariablyModifiedType
void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
assert(type->isVariablyModifiedType() &&
"Must pass variably modified type to EmitVLASizes!");
EnsureInsertPoint();
// We're going to walk down into the type and look for VLA
// expressions.
type = type.getCanonicalType();
do {
assert(type->isVariablyModifiedType());
const Type *ty = type.getTypePtr();
switch (ty->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
llvm_unreachable("unexpected dependent or non-canonical type!");
// These types are never variably-modified.
case Type::Builtin:
case Type::Complex:
case Type::Vector:
case Type::ExtVector:
case Type::Record:
case Type::Enum:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
llvm_unreachable("type class is never variably-modified!");
case Type::Pointer:
type = cast<PointerType>(ty)->getPointeeType();
break;
case Type::BlockPointer:
type = cast<BlockPointerType>(ty)->getPointeeType();
break;
case Type::LValueReference:
case Type::RValueReference:
type = cast<ReferenceType>(ty)->getPointeeType();
break;
case Type::MemberPointer:
type = cast<MemberPointerType>(ty)->getPointeeType();
break;
case Type::ConstantArray:
case Type::IncompleteArray:
// Losing element qualification here is fine.
type = cast<ArrayType>(ty)->getElementType();
break;
case Type::VariableArray: {
// Losing element qualification here is fine.
const VariableArrayType *vat = cast<VariableArrayType>(ty);
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
if (const Expr *size = vat->getSizeExpr()) {
// It's possible that we might have emitted this already,
// e.g. with a typedef and a pointer to it.
llvm::Value *&entry = VLASizeMap[size];
if (!entry) {
// Always zexting here would be wrong if it weren't
// undefined behavior to have a negative bound.
entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
/*signed*/ false);
}
}
type = vat->getElementType();
break;
}
case Type::FunctionProto:
case Type::FunctionNoProto:
type = cast<FunctionType>(ty)->getResultType();
break;
case Type::Atomic:
type = cast<AtomicType>(ty)->getValueType();
break;
}
} while (type->isVariablyModifiedType());
}
示例4: StartFunction
//.........这里部分代码省略.........
CurFn = Fn;
CurFnInfo = &FnInfo;
assert(CurFn->isDeclaration() && "Function already has body?");
// Pass inline keyword to optimizer if it appears explicitly on any
// declaration.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
RE = FD->redecls_end(); RI != RE; ++RI)
if (RI->isInlineSpecified()) {
Fn->addFnAttr(llvm::Attribute::InlineHint);
break;
}
if (getContext().getLangOptions().OpenCL) {
// Add metadata for a kernel function.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
if (FD->hasAttr<OpenCLKernelAttr>()) {
llvm::LLVMContext &Context = getLLVMContext();
llvm::NamedMDNode *OpenCLMetadata =
CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
llvm::Value *Op = Fn;
OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
}
}
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
// later. Don't create this with the builder, because we don't want it
// folded.
llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
if (Builder.isNamePreserving())
AllocaInsertPt->setName("allocapt");
ReturnBlock = getJumpDestInCurrentScope("return");
Builder.SetInsertPoint(EntryBB);
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
// FIXME: what is going on here and why does it ignore all these
// interesting type properties?
QualType FnType =
getContext().getFunctionType(RetTy, 0, 0,
FunctionProtoType::ExtProtoInfo());
DI->setLocation(StartLoc);
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
if (ShouldInstrumentFunction())
EmitFunctionInstrumentation("__cyg_profile_func_enter");
if (CGM.getCodeGenOpts().InstrumentForProfiling)
EmitMCountInstrumentation();
if (RetTy->isVoidType()) {
// Void type; nothing to return.
ReturnValue = 0;
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
hasAggregateLLVMType(CurFnInfo->getReturnType())) {
// Indirect aggregate return; emit returned value directly into sret slot.
// This reduces code size, and affects correctness in C++.
ReturnValue = CurFn->arg_begin();
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
// Tell the epilog emitter to autorelease the result. We do this
// now so that various specialized functions can suppress it
// during their IR-generation.
if (getLangOptions().ObjCAutoRefCount &&
!CurFnInfo->isReturnsRetained() &&
RetTy->isObjCRetainableType())
AutoreleaseResult = true;
}
EmitStartEHSpec(CurCodeDecl);
PrologueCleanupDepth = EHStack.stable_begin();
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i) {
QualType Ty = (*i)->getType();
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
}
// Emit a location at the end of the prologue.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, StartLoc);
}
示例5: StartFunction
//.........这里部分代码省略.........
// later. Don't create this with the builder, because we don't want it
// folded.
llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
if (Builder.isNamePreserving())
AllocaInsertPt->setName("allocapt");
ReturnBlock = getJumpDestInCurrentScope("return");
Builder.SetInsertPoint(EntryBB);
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
unsigned NumArgs = 0;
QualType *ArgsArray = new QualType[Args.size()];
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i) {
ArgsArray[NumArgs++] = (*i)->getType();
}
QualType FnType =
getContext().getFunctionType(RetTy, ArgsArray, NumArgs,
FunctionProtoType::ExtProtoInfo());
delete[] ArgsArray;
DI->setLocation(StartLoc);
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
if (ShouldInstrumentFunction())
EmitFunctionInstrumentation("__cyg_profile_func_enter");
if (CGM.getCodeGenOpts().InstrumentForProfiling)
EmitMCountInstrumentation();
if (RetTy->isVoidType()) {
// Void type; nothing to return.
ReturnValue = 0;
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
hasAggregateLLVMType(CurFnInfo->getReturnType())) {
// Indirect aggregate return; emit returned value directly into sret slot.
// This reduces code size, and affects correctness in C++.
ReturnValue = CurFn->arg_begin();
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
// Tell the epilog emitter to autorelease the result. We do this
// now so that various specialized functions can suppress it
// during their IR-generation.
if (getLangOpts().ObjCAutoRefCount &&
!CurFnInfo->isReturnsRetained() &&
RetTy->isObjCRetainableType())
AutoreleaseResult = true;
}
EmitStartEHSpec(CurCodeDecl);
PrologueCleanupDepth = EHStack.stable_begin();
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
if (MD->getParent()->isLambda() &&
MD->getOverloadedOperator() == OO_Call) {
// We're in a lambda; figure out the captures.
MD->getParent()->getCaptureFields(LambdaCaptureFields,
LambdaThisCaptureField);
if (LambdaThisCaptureField) {
// If this lambda captures this, load it.
QualType LambdaTagType =
getContext().getTagDeclType(LambdaThisCaptureField->getParent());
LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
LambdaTagType);
LValue ThisLValue = EmitLValueForField(LambdaLV,
LambdaThisCaptureField);
CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
}
} else {
// Not in a lambda; just use 'this' from the method.
// FIXME: Should we generate a new load for each use of 'this'? The
// fast register allocator would be happier...
CXXThisValue = CXXABIThisValue;
}
}
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i) {
QualType Ty = (*i)->getType();
if (Ty->isVariablyModifiedType())
EmitVariablyModifiedType(Ty);
}
// Emit a location at the end of the prologue.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, StartLoc);
}
示例6: StartFunction
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function *Fn,
const FunctionArgList &Args,
SourceLocation StartLoc) {
const Decl *D = GD.getDecl();
DidCallStackSave = false;
CurCodeDecl = CurFuncDecl = D;
FnRetTy = RetTy;
CurFn = Fn;
assert(CurFn->isDeclaration() && "Function already has body?");
// Pass inline keyword to optimizer if it appears explicitly on any
// declaration.
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
RE = FD->redecls_end(); RI != RE; ++RI)
if (RI->isInlineSpecified()) {
Fn->addFnAttr(llvm::Attribute::InlineHint);
break;
}
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
// later. Don't create this with the builder, because we don't want it
// folded.
llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext));
AllocaInsertPt = new llvm::BitCastInst(Undef,
llvm::Type::getInt32Ty(VMContext), "",
EntryBB);
if (Builder.isNamePreserving())
AllocaInsertPt->setName("allocapt");
ReturnBlock = createBasicBlock("return");
Builder.SetInsertPoint(EntryBB);
QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
false, false, 0, 0,
/*FIXME?*/false,
/*FIXME?*/CC_Default);
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
DI->setLocation(StartLoc);
DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
}
// FIXME: Leaked.
// CC info is ignored, hopefully?
CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
CC_Default, false);
if (RetTy->isVoidType()) {
// Void type; nothing to return.
ReturnValue = 0;
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
hasAggregateLLVMType(CurFnInfo->getReturnType())) {
// Indirect aggregate return; emit returned value directly into sret slot.
// This reduces code size, and affects correctness in C++.
ReturnValue = CurFn->arg_begin();
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
}
EmitStartEHSpec(CurCodeDecl);
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
if (CXXThisDecl)
CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
if (CXXVTTDecl)
CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i) {
QualType Ty = i->second;
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
}
}
示例7: EmitLocalBlockVarDecl
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
QualType Ty = D.getType();
bool isByRef = D.hasAttr<BlocksAttr>();
bool needsDispose = false;
unsigned Align = 0;
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
// A normal fixed sized variable becomes an alloca in the entry block.
const llvm::Type *LTy = ConvertTypeForMem(Ty);
Align = getContext().getDeclAlignInBytes(&D);
if (isByRef)
LTy = BuildByRefType(Ty, Align);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
Alloc->setName(D.getNameAsString().c_str());
if (isByRef)
Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
Alloc->setAlignment(Align);
DeclPtr = Alloc;
} else {
// Targets that don't support recursion emit locals as globals.
const char *Class =
D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
DeclPtr = CreateStaticBlockVarDecl(D, Class,
llvm::GlobalValue
::InternalLinkage);
}
// FIXME: Can this happen?
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
} else {
EnsureInsertPoint();
if (!DidCallStackSave) {
// Save the stack.
const llvm::Type *LTy =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
Builder.CreateStore(V, Stack);
DidCallStackSave = true;
{
// Push a cleanup block and restore the stack there.
CleanupScope scope(*this);
V = Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
Builder.CreateCall(F, V);
}
}
// Get the element type.
const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
const llvm::Type *LElemPtrTy =
llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
llvm::Value *VLASize = EmitVLASize(Ty);
// Downcast the VLA size expression
VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
false, "tmp");
// Allocate memory for the array.
llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext),
VLASize, "vla");
DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
}
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
DMEntry = DeclPtr;
// Emit debug info for local var declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
assert(HaveInsertPoint() && "Unexpected unreachable point!");
DI->setLocation(D.getLocation());
if (Target.useGlobalsForAutomaticVariables()) {
DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
} else if (isByRef) {
llvm::Value *Loc;
bool needsCopyDispose = BlockRequiresCopying(Ty);
Loc = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
Loc = Builder.CreateLoad(Loc, false);
Loc = Builder.CreateBitCast(Loc, DeclPtr->getType());
Loc = Builder.CreateStructGEP(Loc, needsCopyDispose*2+4, "x");
DI->EmitDeclareOfAutoVariable(&D, Loc, Builder);
} else
DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
//.........这里部分代码省略.........