本文整理汇总了C++中recorddecl::field_iterator::getType方法的典型用法代码示例。如果您正苦于以下问题:C++ field_iterator::getType方法的具体用法?C++ field_iterator::getType怎么用?C++ field_iterator::getType使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类recorddecl::field_iterator
的用法示例。
在下文中一共展示了field_iterator::getType方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: GetNumNonZeroBytesInInit
/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
/// non-zero bytes that will be stored when outputting the initializer for the
/// specified initializer expression.
static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
E = E->IgnoreParens();
// 0 and 0.0 won't require any non-zero stores!
if (isSimpleZero(E, CGF)) return CharUnits::Zero();
// If this is an initlist expr, sum up the size of sizes of the (present)
// elements. If this is something weird, assume the whole thing is non-zero.
const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
return CGF.getContext().getTypeSizeInChars(E->getType());
// InitListExprs for structs have to be handled carefully. If there are
// reference members, we need to consider the size of the reference, not the
// referencee. InitListExprs for unions and arrays can't have references.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
if (!RT->isUnionType()) {
RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
CharUnits NumNonZeroBytes = CharUnits::Zero();
unsigned ILEElement = 0;
for (RecordDecl::field_iterator Field = SD->field_begin(),
FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
// We're done once we hit the flexible array member or run out of
// InitListExpr elements.
if (Field->getType()->isIncompleteArrayType() ||
ILEElement == ILE->getNumInits())
break;
if (Field->isUnnamedBitfield())
continue;
const Expr *E = ILE->getInit(ILEElement++);
// Reference values are always non-null and have the width of a pointer.
if (Field->getType()->isReferenceType())
NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
CGF.getContext().getTargetInfo().getPointerWidth(0));
else
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
}
return NumNonZeroBytes;
}
}
CharUnits NumNonZeroBytes = CharUnits::Zero();
for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
return NumNonZeroBytes;
}
示例2: isCallbackArg
static bool isCallbackArg(SVal V, QualType T) {
// If the parameter is 0, it's harmless.
if (V.isZeroConstant())
return false;
// If a parameter is a block or a callback, assume it can modify pointer.
if (T->isBlockPointerType() ||
T->isFunctionPointerType() ||
T->isObjCSelType())
return true;
// Check if a callback is passed inside a struct (for both, struct passed by
// reference and by value). Dig just one level into the struct for now.
if (isa<PointerType>(T) || isa<ReferenceType>(T))
T = T->getPointeeType();
if (const RecordType *RT = T->getAsStructureType()) {
const RecordDecl *RD = RT->getDecl();
for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
I != E; ++I) {
QualType FieldT = I->getType();
if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType())
return true;
}
}
return false;
}
示例3: Find
bool Find(const TypedValueRegion *R) {
QualType T = R->getValueType();
if (const RecordType *RT = T->getAsStructureType()) {
const RecordDecl *RD = RT->getDecl()->getDefinition();
assert(RD && "Referred record has no definition");
for (RecordDecl::field_iterator I =
RD->field_begin(), E = RD->field_end(); I!=E; ++I) {
const FieldRegion *FR = MrMgr.getFieldRegion(&*I, R);
FieldChain.push_back(&*I);
T = I->getType();
if (T->getAsStructureType()) {
if (Find(FR))
return true;
}
else {
const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
if (V.isUndef())
return true;
}
FieldChain.pop_back();
}
}
return false;
}
示例4: GetFields
bool GetFields( RecordDecl * rd, Obj * entries) {
//check the fields of this struct, if any one of them is not understandable, then this struct becomes 'opaque'
//that is, we insert the type, and link it to its llvm type, so it can be used in terra code
//but none of its fields are exposed (since we don't understand the layout)
bool opaque = false;
for(RecordDecl::field_iterator it = rd->field_begin(), end = rd->field_end(); it != end; ++it) {
if(it->isBitField() || it->isAnonymousStructOrUnion() || !it->getDeclName()) {
opaque = true;
continue;
}
DeclarationName declname = it->getDeclName();
std::string declstr = declname.getAsString();
QualType FT = it->getType();
Obj fobj;
if(!GetType(FT,&fobj)) {
opaque = true;
continue;
}
lua_newtable(L);
fobj.push();
lua_setfield(L,-2,"type");
lua_pushstring(L,declstr.c_str());
lua_setfield(L,-2,"field");
entries->addentry();
}
return !opaque;
}
示例5: TypeHasMayAlias
bool
CodeGenTBAA::CollectFields(uint64_t BaseOffset,
QualType QTy,
SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &
Fields,
bool MayAlias) {
/* Things not handled yet include: C++ base classes, bitfields, */
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
const RecordDecl *RD = TTy->getDecl()->getDefinition();
if (RD->hasFlexibleArrayMember())
return false;
// TODO: Handle C++ base classes.
if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD))
if (Decl->bases_begin() != Decl->bases_end())
return false;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
unsigned idx = 0;
const FieldDecl *LastFD = 0;
bool IsMsStruct = RD->isMsStruct(Context);
for (RecordDecl::field_iterator i = RD->field_begin(),
e = RD->field_end(); i != e; ++i, ++idx) {
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are ignored.
if (Context.ZeroBitfieldFollowsNonBitfield(*i, LastFD)) {
--idx;
continue;
}
LastFD = *i;
}
uint64_t Offset = BaseOffset +
Layout.getFieldOffset(idx) / Context.getCharWidth();
QualType FieldQTy = i->getType();
if (!CollectFields(Offset, FieldQTy, Fields,
MayAlias || TypeHasMayAlias(FieldQTy)))
return false;
}
return true;
}
/* Otherwise, treat whatever it is as a field. */
uint64_t Offset = BaseOffset;
uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
llvm::MDNode *TBAAInfo = MayAlias ? getChar() : getTBAAInfo(QTy);
llvm::MDNode *TBAATag = CodeGenOpts.StructPathTBAA ?
getTBAAScalarTagInfo(TBAAInfo) : TBAAInfo;
Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
return true;
}
示例6: Out
llvm::MDNode *
CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
assert(isTBAAPathStruct(QTy));
if (llvm::MDNode *N = StructTypeMetadataCache[Ty])
return N;
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
const RecordDecl *RD = TTy->getDecl()->getDefinition();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
SmallVector <std::pair<uint64_t, llvm::MDNode*>, 4> Fields;
unsigned idx = 0;
const FieldDecl *LastFD = 0;
bool IsMsStruct = RD->isMsStruct(Context);
for (RecordDecl::field_iterator i = RD->field_begin(),
e = RD->field_end(); i != e; ++i, ++idx) {
if (IsMsStruct) {
// Zero-length bitfields following non-bitfield members are ignored.
if (Context.ZeroBitfieldFollowsNonBitfield(*i, LastFD)) {
--idx;
continue;
}
LastFD = *i;
}
QualType FieldQTy = i->getType();
llvm::MDNode *FieldNode;
if (isTBAAPathStruct(FieldQTy))
FieldNode = getTBAAStructTypeInfo(FieldQTy);
else
FieldNode = getTBAAInfo(FieldQTy);
if (!FieldNode)
return StructTypeMetadataCache[Ty] = NULL;
Fields.push_back(std::make_pair(
Layout.getFieldOffset(idx) / Context.getCharWidth(), FieldNode));
}
// TODO: This is using the RTTI name. Is there a better way to get
// a unique string for a type?
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
MContext.mangleCXXRTTIName(QualType(Ty, 0), Out);
Out.flush();
// Create the struct type node with a vector of pairs (offset, type).
return StructTypeMetadataCache[Ty] =
MDHelper.createTBAAStructTypeNode(OutName, Fields);
}
return StructMetadataCache[Ty] = NULL;
}
示例7: Out
llvm::MDNode *
CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
assert(isTBAAPathStruct(QTy));
if (llvm::MDNode *N = StructTypeMetadataCache[Ty])
return N;
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
const RecordDecl *RD = TTy->getDecl()->getDefinition();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
SmallVector <std::pair<uint64_t, llvm::MDNode*>, 4> Fields;
// To reduce the size of MDNode for a given struct type, we only output
// once for all the fields with the same scalar types.
// Offsets for scalar fields in the type DAG are not used.
llvm::SmallSet <llvm::MDNode*, 4> ScalarFieldTypes;
unsigned idx = 0;
for (RecordDecl::field_iterator i = RD->field_begin(),
e = RD->field_end(); i != e; ++i, ++idx) {
QualType FieldQTy = i->getType();
llvm::MDNode *FieldNode;
if (isTBAAPathStruct(FieldQTy))
FieldNode = getTBAAStructTypeInfo(FieldQTy);
else {
FieldNode = getTBAAInfo(FieldQTy);
// Ignore this field if the type already exists.
if (ScalarFieldTypes.count(FieldNode))
continue;
ScalarFieldTypes.insert(FieldNode);
}
if (!FieldNode)
return StructTypeMetadataCache[Ty] = NULL;
Fields.push_back(std::make_pair(
Layout.getFieldOffset(idx) / Context.getCharWidth(), FieldNode));
}
// TODO: This is using the RTTI name. Is there a better way to get
// a unique string for a type?
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
MContext.mangleCXXRTTIName(QualType(Ty, 0), Out);
Out.flush();
// Create the struct type node with a vector of pairs (offset, type).
return StructTypeMetadataCache[Ty] =
MDHelper.createTBAAStructTypeNode(OutName, Fields);
}
return StructMetadataCache[Ty] = NULL;
}
示例8: Out
llvm::MDNode *
CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) {
const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
assert(isTBAAPathStruct(QTy));
if (llvm::MDNode *N = StructTypeMetadataCache[Ty])
return N;
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
const RecordDecl *RD = TTy->getDecl()->getDefinition();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
SmallVector <std::pair<llvm::MDNode*, uint64_t>, 4> Fields;
unsigned idx = 0;
for (RecordDecl::field_iterator i = RD->field_begin(),
e = RD->field_end(); i != e; ++i, ++idx) {
QualType FieldQTy = i->getType();
llvm::MDNode *FieldNode;
if (isTBAAPathStruct(FieldQTy))
FieldNode = getTBAAStructTypeInfo(FieldQTy);
else
FieldNode = getTBAAInfo(FieldQTy);
if (!FieldNode)
return StructTypeMetadataCache[Ty] = NULL;
Fields.push_back(std::make_pair(
FieldNode, Layout.getFieldOffset(idx) / Context.getCharWidth()));
}
// TODO: This is using the RTTI name. Is there a better way to get
// a unique string for a type?
SmallString<256> OutName;
if (Features.CPlusPlus) {
// Don't use mangleCXXRTTIName for C code.
llvm::raw_svector_ostream Out(OutName);
MContext.mangleCXXRTTIName(QualType(Ty, 0), Out);
Out.flush();
} else {
OutName = RD->getName();
}
// Create the struct type node with a vector of pairs (offset, type).
return StructTypeMetadataCache[Ty] =
MDHelper.createTBAAStructTypeNode(OutName, Fields);
}
return StructMetadataCache[Ty] = NULL;
}
示例9: TypeHasMayAlias
bool
CodeGenTBAA::CollectFields(uint64_t BaseOffset,
QualType QTy,
SmallVectorImpl<llvm::MDBuilder::TBAAStructField> &
Fields,
bool MayAlias) {
/* Things not handled yet include: C++ base classes, bitfields, */
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
const RecordDecl *RD = TTy->getDecl()->getDefinition();
if (RD->hasFlexibleArrayMember())
return false;
// TODO: Handle C++ base classes.
if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD))
if (Decl->bases_begin() != Decl->bases_end())
return false;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
unsigned idx = 0;
for (RecordDecl::field_iterator i = RD->field_begin(),
e = RD->field_end(); i != e; ++i, ++idx) {
uint64_t Offset = BaseOffset +
Layout.getFieldOffset(idx) / Context.getCharWidth();
QualType FieldQTy = i->getType();
if (!CollectFields(Offset, FieldQTy, Fields,
MayAlias || TypeHasMayAlias(FieldQTy)))
return false;
}
return true;
}
/* Otherwise, treat whatever it is as a field. */
uint64_t Offset = BaseOffset;
uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity();
llvm::MDNode *TBAAType = MayAlias ? getChar() : getTypeInfo(QTy);
llvm::MDNode *TBAATag = getAccessTagInfo(TBAAAccessInfo(TBAAType, Size));
Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag));
return true;
}
示例10:
/// isSafeToConvert - Return true if it is safe to convert the specified record
/// decl to IR and lay it out, false if doing so would cause us to get into a
/// recursive compilation mess.
static bool
isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
// If we have already checked this type (maybe the same type is used by-value
// multiple times in multiple structure fields, don't check again.
if (!AlreadyChecked.insert(RD)) return true;
const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
// If this type is already laid out, converting it is a noop.
if (CGT.isRecordLayoutComplete(Key)) return true;
// If this type is currently being laid out, we can't recursively compile it.
if (CGT.isRecordBeingLaidOut(Key))
return false;
// If this type would require laying out bases that are currently being laid
// out, don't do it. This includes virtual base classes which get laid out
// when a class is translated, even though they aren't embedded by-value into
// the class.
if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(),
E = CRD->bases_end(); I != E; ++I)
if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(),
CGT, AlreadyChecked))
return false;
}
// If this type would require laying out members that are currently being laid
// out, don't do it.
for (RecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I)
if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
return false;
// If there are no problems, lets do it.
return true;
}
示例11: LayoutUnion
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
llvm::Type *unionType = 0;
CharUnits unionSize = CharUnits::Zero();
CharUnits unionAlign = CharUnits::Zero();
bool hasOnlyZeroSizedBitFields = true;
bool checkedFirstFieldZeroInit = false;
unsigned fieldNo = 0;
for (RecordDecl::field_iterator field = D->field_begin(),
fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
assert(layout.getFieldOffset(fieldNo) == 0 &&
"Union field offset did not start at the beginning of record!");
llvm::Type *fieldType = LayoutUnionField(*field, layout);
if (!fieldType)
continue;
if (field->getDeclName() && !checkedFirstFieldZeroInit) {
CheckZeroInitializable(field->getType());
checkedFirstFieldZeroInit = true;
}
hasOnlyZeroSizedBitFields = false;
CharUnits fieldAlign = CharUnits::fromQuantity(
Types.getDataLayout().getABITypeAlignment(fieldType));
CharUnits fieldSize = CharUnits::fromQuantity(
Types.getDataLayout().getTypeAllocSize(fieldType));
if (fieldAlign < unionAlign)
continue;
if (fieldAlign > unionAlign || fieldSize > unionSize) {
unionType = fieldType;
unionAlign = fieldAlign;
unionSize = fieldSize;
}
}
// Now add our field.
if (unionType) {
AppendField(CharUnits::Zero(), unionType);
if (getTypeAlignment(unionType) > layout.getAlignment()) {
// We need a packed struct.
Packed = true;
unionAlign = CharUnits::One();
}
}
if (unionAlign.isZero()) {
(void)hasOnlyZeroSizedBitFields;
assert(hasOnlyZeroSizedBitFields &&
"0-align record did not have all zero-sized bit-fields!");
unionAlign = CharUnits::One();
}
// Append tail padding.
CharUnits recordSize = layout.getSize();
if (recordSize > unionSize)
AppendPadding(recordSize, unionAlign);
}
示例12: getFieldBitOffset
void
CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd) {
// Run stores the first element of the current run of bitfields. FieldEnd is
// used as a special value to note that we don't have a current run. A
// bitfield run is a contiguous collection of bitfields that can be stored in
// the same storage block. Zero-sized bitfields and bitfields that would
// cross an alignment boundary break a run and start a new one.
RecordDecl::field_iterator Run = FieldEnd;
// Tail is the offset of the first bit off the end of the current run. It's
// used to determine if the ASTRecordLayout is treating these two bitfields as
// contiguous. StartBitOffset is offset of the beginning of the Run.
uint64_t StartBitOffset, Tail = 0;
if (isDiscreteBitFieldABI()) {
for (; Field != FieldEnd; ++Field) {
uint64_t BitOffset = getFieldBitOffset(*Field);
// Zero-width bitfields end runs.
if (Field->isZeroLengthBitField(Context)) {
Run = FieldEnd;
continue;
}
llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
Run = Field;
StartBitOffset = BitOffset;
Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
// Add the storage member to the record. This must be added to the
// record before the bitfield members so that it gets laid out before
// the bitfields it contains get laid out.
Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
}
// Bitfields get the offset of their storage but come afterward and remain
// there after a stable sort.
Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
MemberInfo::Field, nullptr, *Field));
}
return;
}
// Check if OffsetInRecord is better as a single field run. When OffsetInRecord
// has legal integer width, and its bitfield offset is naturally aligned, it
// is better to make the bitfield a separate storage component so as it can be
// accessed directly with lower cost.
auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
uint64_t StartBitOffset) {
if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
return false;
if (!DataLayout.isLegalInteger(OffsetInRecord))
return false;
// Make sure StartBitOffset is natually aligned if it is treated as an
// IType integer.
if (StartBitOffset %
Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
0)
return false;
return true;
};
// The start field is better as a single field run.
bool StartFieldAsSingleRun = false;
for (;;) {
// Check to see if we need to start a new run.
if (Run == FieldEnd) {
// If we're out of fields, return.
if (Field == FieldEnd)
break;
// Any non-zero-length bitfield can start a new run.
if (!Field->isZeroLengthBitField(Context)) {
Run = Field;
StartBitOffset = getFieldBitOffset(*Field);
Tail = StartBitOffset + Field->getBitWidthValue(Context);
StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
StartBitOffset);
}
++Field;
continue;
}
// If the start field of a new run is better as a single run, or
// if current field (or consecutive fields) is better as a single run, or
// if current field has zero width bitfield and either
// UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
// true, or
// if the offset of current field is inconsistent with the offset of
// previous field plus its offset,
// skip the block below and go ahead to emit the storage.
// Otherwise, try to add bitfields to the run.
if (!StartFieldAsSingleRun && Field != FieldEnd &&
!IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
(!Field->isZeroLengthBitField(Context) ||
(!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
!Context.getTargetInfo().useBitFieldTypeAlignment())) &&
Tail == getFieldBitOffset(*Field)) {
Tail += Field->getBitWidthValue(Context);
++Field;
continue;
}
//.........这里部分代码省略.........
示例13: LayoutUnion
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
const llvm::Type *Ty = 0;
uint64_t Size = 0;
unsigned Align = 0;
bool HasOnlyZeroSizedBitFields = true;
unsigned FieldNo = 0;
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
assert(Layout.getFieldOffset(FieldNo) == 0 &&
"Union field offset did not start at the beginning of record!");
if (Field->isBitField()) {
uint64_t FieldSize =
Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
// Ignore zero sized bit fields.
if (FieldSize == 0)
continue;
// Add the bit field info.
Types.addBitFieldInfo(*Field, 0, 0, FieldSize);
} else
Types.addFieldInfo(*Field, 0);
HasOnlyZeroSizedBitFields = false;
const llvm::Type *FieldTy =
Types.ConvertTypeForMemRecursive(Field->getType());
unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
if (FieldAlign < Align)
continue;
if (FieldAlign > Align || FieldSize > Size) {
Ty = FieldTy;
Align = FieldAlign;
Size = FieldSize;
}
}
// Now add our field.
if (Ty) {
AppendField(0, Ty);
if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
// We need a packed struct.
Packed = true;
Align = 1;
}
}
if (!Align) {
assert(HasOnlyZeroSizedBitFields &&
"0-align record did not have all zero-sized bit-fields!");
Align = 1;
}
// Append tail padding.
if (Layout.getSize() / 8 > Size)
AppendPadding(Layout.getSize() / 8, Align);
}
示例14: VisitInitListExpr
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
// FIXME: Assess perf here? Figure out what cases are worth optimizing here
// (Length of globals? Chunks of zeroed-out space?).
//
// If we can, prefer a copy from a global; this is a lot less code for long
// globals, and it's easier for the current optimizers to analyze.
if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
llvm::GlobalVariable* GV =
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage, C, "");
EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
return;
}
#endif
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
llvm::Value *DestPtr = Dest.getAddr();
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
llvm::PointerType *APType =
cast<llvm::PointerType>(DestPtr->getType());
llvm::ArrayType *AType =
cast<llvm::ArrayType>(APType->getElementType());
uint64_t NumInitElements = E->getNumInits();
if (E->getNumInits() > 0) {
QualType T1 = E->getType();
QualType T2 = E->getInit(0)->getType();
if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
EmitAggLoadOfLValue(E->getInit(0));
return;
}
}
uint64_t NumArrayElements = AType->getNumElements();
assert(NumInitElements <= NumArrayElements);
QualType elementType = E->getType().getCanonicalType();
elementType = CGF.getContext().getQualifiedType(
cast<ArrayType>(elementType)->getElementType(),
elementType.getQualifiers() + Dest.getQualifiers());
// DestPtr is an array*. Construct an elementType* by drilling
// down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = { zero, zero };
llvm::Value *begin =
Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
// Exception safety requires us to destroy all the
// already-constructed members if an initializer throws.
// For that, we'll need an EH cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
llvm::AllocaInst *endOfInit = 0;
EHScopeStack::stable_iterator cleanup;
llvm::Instruction *cleanupDominator = 0;
if (CGF.needsEHCleanup(dtorKind)) {
// In principle we could tell the cleanup where we are more
// directly, but the control flow can get so varied here that it
// would actually be quite complex. Therefore we go through an
// alloca.
endOfInit = CGF.CreateTempAlloca(begin->getType(),
"arrayinit.endOfInit");
cleanupDominator = Builder.CreateStore(begin, endOfInit);
CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
CGF.getDestroyer(dtorKind));
cleanup = CGF.EHStack.stable_begin();
// Otherwise, remember that we didn't need a cleanup.
} else {
dtorKind = QualType::DK_none;
}
llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
// The 'current element to initialize'. The invariants on this
// variable are complicated. Essentially, after each iteration of
// the loop, it points to the last initialized element, except
// that it points to the beginning of the array before any
// elements have been initialized.
llvm::Value *element = begin;
// Emit the explicit initializers.
for (uint64_t i = 0; i != NumInitElements; ++i) {
// Advance to the next element.
if (i > 0) {
element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
if (endOfInit) Builder.CreateStore(element, endOfInit);
}
LValue elementLV = CGF.MakeAddrLValue(element, elementType);
EmitInitializationToLValue(E->getInit(i), elementLV);
//.........这里部分代码省略.........
示例15: getFieldBitOffset
void
CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd) {
// Run stores the first element of the current run of bitfields. FieldEnd is
// used as a special value to note that we don't have a current run. A
// bitfield run is a contiguous collection of bitfields that can be stored in
// the same storage block. Zero-sized bitfields and bitfields that would
// cross an alignment boundary break a run and start a new one.
RecordDecl::field_iterator Run = FieldEnd;
// Tail is the offset of the first bit off the end of the current run. It's
// used to determine if the ASTRecordLayout is treating these two bitfields as
// contiguous. StartBitOffset is offset of the beginning of the Run.
uint64_t StartBitOffset, Tail = 0;
if (useMSABI()) {
for (; Field != FieldEnd; ++Field) {
uint64_t BitOffset = getFieldBitOffset(*Field);
// Zero-width bitfields end runs.
if (Field->getBitWidthValue(Context) == 0) {
Run = FieldEnd;
continue;
}
llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
Run = Field;
StartBitOffset = BitOffset;
Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
// Add the storage member to the record. This must be added to the
// record before the bitfield members so that it gets laid out before
// the bitfields it contains get laid out.
Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
}
// Bitfields get the offset of their storage but come afterward and remain
// there after a stable sort.
Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
MemberInfo::Field, nullptr, *Field));
}
return;
}
for (;;) {
// Check to see if we need to start a new run.
if (Run == FieldEnd) {
// If we're out of fields, return.
if (Field == FieldEnd)
break;
// Any non-zero-length bitfield can start a new run.
if (Field->getBitWidthValue(Context) != 0) {
Run = Field;
StartBitOffset = getFieldBitOffset(*Field);
Tail = StartBitOffset + Field->getBitWidthValue(Context);
}
++Field;
continue;
}
// Add bitfields to the run as long as they qualify.
if (Field != FieldEnd && Field->getBitWidthValue(Context) != 0 &&
Tail == getFieldBitOffset(*Field)) {
Tail += Field->getBitWidthValue(Context);
++Field;
continue;
}
// We've hit a break-point in the run and need to emit a storage field.
llvm::Type *Type = getIntNType(Tail - StartBitOffset);
// Add the storage member to the record and set the bitfield info for all of
// the bitfields in the run. Bitfields get the offset of their storage but
// come afterward and remain there after a stable sort.
Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
for (; Run != Field; ++Run)
Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
MemberInfo::Field, nullptr, *Run));
Run = FieldEnd;
}
}