本文整理汇总了C++中FArchive::Tell方法的典型用法代码示例。如果您正苦于以下问题:C++ FArchive::Tell方法的具体用法?C++ FArchive::Tell怎么用?C++ FArchive::Tell使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FArchive
的用法示例。
在下文中一共展示了FArchive::Tell方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: S
bool operator<<(FArchive& Ar,FVertexFactoryParameterRef& Ref)
{
bool bShaderHasOutdatedParameters = false;
Ar << Ref.VertexFactoryType;
uint8 ShaderFrequencyByte = Ref.ShaderFrequency;
Ar << ShaderFrequencyByte;
if(Ar.IsLoading())
{
Ref.ShaderFrequency = (EShaderFrequency)ShaderFrequencyByte;
}
Ar << Ref.VFHash;
if (Ar.IsLoading())
{
delete Ref.Parameters;
if (Ref.VertexFactoryType)
{
Ref.Parameters = Ref.VertexFactoryType->CreateShaderParameters(Ref.ShaderFrequency);
}
else
{
bShaderHasOutdatedParameters = true;
Ref.Parameters = NULL;
}
}
// Need to be able to skip over parameters for no longer existing vertex factories.
int32 SkipOffset = Ar.Tell();
{
FArchive::FScopeSetDebugSerializationFlags S(Ar, DSF_IgnoreDiff);
// Write placeholder.
Ar << SkipOffset;
}
if(Ref.Parameters)
{
Ref.Parameters->Serialize(Ar);
}
else if(Ar.IsLoading())
{
Ar.Seek( SkipOffset );
}
if( Ar.IsSaving() )
{
int32 EndOffset = Ar.Tell();
Ar.Seek( SkipOffset );
Ar << EndOffset;
Ar.Seek( EndOffset );
}
return bShaderHasOutdatedParameters;
}
示例2: SaveToFile
bool FBuildPatchAppManifest::SaveToFile(const FString& Filename, bool bUseBinary)
{
bool bSuccess = false;
FArchive* FileOut = IFileManager::Get().CreateFileWriter(*Filename);
if (FileOut)
{
if (bUseBinary)
{
Data->ManifestFileVersion = EBuildPatchAppManifestVersion::GetLatestVersion();
FManifestWriter ManifestData;
Serialize(ManifestData);
ManifestData.Finalize();
if (!ManifestData.IsError())
{
int32 DataSize = ManifestData.TotalSize();
TArray<uint8> TempCompressed;
TempCompressed.AddUninitialized(DataSize);
int32 CompressedSize = DataSize;
bool bDataIsCompressed = FCompression::CompressMemory(
static_cast<ECompressionFlags>(COMPRESS_ZLIB | COMPRESS_BiasMemory),
TempCompressed.GetData(),
CompressedSize,
ManifestData.GetBytes().GetData(),
DataSize);
TempCompressed.SetNum(CompressedSize);
TArray<uint8>& FileData = bDataIsCompressed ? TempCompressed : ManifestData.GetBytes();
FManifestFileHeader Header;
*FileOut << Header;
Header.HeaderSize = FileOut->Tell();
Header.StoredAs = bDataIsCompressed ? EManifestFileHeader::STORED_COMPRESSED : EManifestFileHeader::STORED_RAW;
Header.DataSize = DataSize;
Header.CompressedSize = bDataIsCompressed ? CompressedSize : 0;
FSHA1::HashBuffer(FileData.GetData(), FileData.Num(), Header.SHAHash.Hash);
FileOut->Seek(0);
*FileOut << Header;
FileOut->Serialize(FileData.GetData(), FileData.Num());
bSuccess = !FileOut->IsError();
}
}
else
{
Data->ManifestFileVersion = EBuildPatchAppManifestVersion::GetLatestJsonVersion();
FString JSONOutput;
SerializeToJSON(JSONOutput);
FTCHARToUTF8 JsonUTF8(*JSONOutput);
FileOut->Serialize((UTF8CHAR*)JsonUTF8.Get(), JsonUTF8.Length() * sizeof(UTF8CHAR));
}
FileOut->Close();
delete FileOut;
FileOut = nullptr;
}
return bSuccess;
}
示例3: CopyCompressedFileToPak
bool CopyCompressedFileToPak(FArchive& InPak, const FString& InMountPoint, const FPakInputPair& InFile, const FCompressedFileBuffer& CompressedFile, FPakEntryPair& OutNewEntry)
{
if (CompressedFile.TotalCompressedSize == 0)
{
return false;
}
int64 HeaderTell = InPak.Tell();
OutNewEntry.Info.CompressionMethod = CompressedFile.FileCompressionMethod;
OutNewEntry.Info.CompressionBlocks.AddUninitialized(CompressedFile.CompressedBlocks.Num());
int64 TellPos = InPak.Tell() + OutNewEntry.Info.GetSerializedSize(FPakInfo::PakFile_Version_Latest);
const TArray<FPakCompressedBlock>& Blocks = CompressedFile.CompressedBlocks;
for (int32 BlockIndex = 0, BlockCount = CompressedFile.CompressedBlocks.Num(); BlockIndex < BlockCount; ++BlockIndex)
{
OutNewEntry.Info.CompressionBlocks[BlockIndex].CompressedStart = Blocks[BlockIndex].CompressedStart + TellPos;
OutNewEntry.Info.CompressionBlocks[BlockIndex].CompressedEnd = Blocks[BlockIndex].CompressedEnd + TellPos;
}
if (InFile.bNeedEncryption)
{
FAES::EncryptData(CompressedFile.CompressedBuffer.Get(), CompressedFile.TotalCompressedSize);
}
//Hash the final buffer thats written
FSHA1 Hash;
Hash.Update(CompressedFile.CompressedBuffer.Get(), CompressedFile.TotalCompressedSize);
Hash.Final();
// Update file size & Hash
OutNewEntry.Info.CompressionBlockSize = CompressedFile.FileCompressionBlockSize;
OutNewEntry.Info.UncompressedSize = CompressedFile.OriginalSize;
OutNewEntry.Info.Size = CompressedFile.TotalCompressedSize;
Hash.GetHash(OutNewEntry.Info.Hash);
// Write the header, then the data
OutNewEntry.Filename = InFile.Dest.Mid(InMountPoint.Len());
OutNewEntry.Info.Offset = 0; // Don't serialize offsets here.
OutNewEntry.Info.bEncrypted = InFile.bNeedEncryption;
OutNewEntry.Info.Serialize(InPak,FPakInfo::PakFile_Version_Latest);
InPak.Serialize(CompressedFile.CompressedBuffer.Get(), CompressedFile.TotalCompressedSize);
return true;
}
示例4: SkipLazyArray
void SkipLazyArray(FArchive &Ar)
{
guard(SkipLazyArray);
assert(Ar.IsLoading);
int pos;
Ar << pos;
assert(Ar.Tell() < pos);
Ar.Seek(pos);
unguard;
}
示例5: LoadDataIntoMemory
/**
* Loads the data from disk into the specified memory block. This requires us still being attached to an
* archive we can use for serialization.
*
* @param Dest Memory to serialize data into
*/
void FUntypedBulkData::LoadDataIntoMemory( void* Dest )
{
#if WITH_EDITOR
checkf( AttachedAr, TEXT( "Attempted to load bulk data without an attached archive. Most likely the bulk data was loaded twice on console, which is not supported" ) );
// Keep track of current position in file so we can restore it later.
int64 PushedPos = AttachedAr->Tell();
// Seek to the beginning of the bulk data in the file.
AttachedAr->Seek( BulkDataOffsetInFile );
SerializeBulkData( *AttachedAr, Dest );
// Restore file pointer.
AttachedAr->Seek( PushedPos );
#else
bool bWasLoadedSuccessfully = false;
if (IsInGameThread() && Linker.IsValid())
{
ULinkerLoad* LinkerLoad = Linker.Get();
if ( LinkerLoad && LinkerLoad->Loader && !LinkerLoad->IsCompressed() )
{
FArchive* Ar = LinkerLoad;
// keep track of current position in this archive
int64 CurPos = Ar->Tell();
// Seek to the beginning of the bulk data in the file.
Ar->Seek( BulkDataOffsetInFile );
// serialize the bulk data
SerializeBulkData( *Ar, Dest );
// seek back to the position the archive was before
Ar->Seek(CurPos);
// note that we loaded it
bWasLoadedSuccessfully = true;
}
}
// if we weren't able to load via linker, load directly by filename
if (!bWasLoadedSuccessfully)
{
// load from the specied filename when the linker has been cleared
checkf( Filename != TEXT(""), TEXT( "Attempted to load bulk data without a proper filename." ) );
FArchive* Ar = IFileManager::Get().CreateFileReader(*Filename, FILEREAD_Silent);
checkf( Ar != NULL, TEXT( "Attempted to load bulk data from an invalid filename '%s'." ), *Filename );
// Seek to the beginning of the bulk data in the file.
Ar->Seek( BulkDataOffsetInFile );
SerializeBulkData( *Ar, Dest );
delete Ar;
}
#endif // WITH_EDITOR
}
示例6: VerifyFile
uint8 FBuildPatchUtils::VerifyFile(const FString& FileToVerify, const FSHAHashData& Hash1, const FSHAHashData& Hash2, FBuildPatchFloatDelegate ProgressDelegate, FBuildPatchBoolRetDelegate ShouldPauseDelegate, double& TimeSpentPaused)
{
uint8 ReturnValue = 0;
FArchive* FileReader = IFileManager::Get().CreateFileReader(*FileToVerify);
ProgressDelegate.ExecuteIfBound(0.0f);
if (FileReader != NULL)
{
FSHA1 HashState;
FSHAHashData HashValue;
const int64 FileSize = FileReader->TotalSize();
uint8* FileReadBuffer = new uint8[FileBufferSize];
while (!FileReader->AtEnd() && !FBuildPatchInstallError::HasFatalError())
{
// Pause if necessary
const double PrePauseTime = FPlatformTime::Seconds();
double PostPauseTime = PrePauseTime;
bool bShouldPause = ShouldPauseDelegate.IsBound() && ShouldPauseDelegate.Execute();
while (bShouldPause && !FBuildPatchInstallError::HasFatalError())
{
FPlatformProcess::Sleep(0.1f);
bShouldPause = ShouldPauseDelegate.Execute();
PostPauseTime = FPlatformTime::Seconds();
}
// Count up pause time
TimeSpentPaused += PostPauseTime - PrePauseTime;
// Read file and update hash state
const int64 SizeLeft = FileSize - FileReader->Tell();
const uint32 ReadLen = FMath::Min< int64 >(FileBufferSize, SizeLeft);
FileReader->Serialize(FileReadBuffer, ReadLen);
HashState.Update(FileReadBuffer, ReadLen);
const double FileSizeTemp = FileSize;
const float Progress = 1.0f - ((SizeLeft - ReadLen) / FileSizeTemp);
ProgressDelegate.ExecuteIfBound(Progress);
}
delete[] FileReadBuffer;
HashState.Final();
HashState.GetHash(HashValue.Hash);
ReturnValue = (HashValue == Hash1) ? 1 : (HashValue == Hash2) ? 2 : 0;
if (ReturnValue == 0)
{
GLog->Logf(TEXT("BuildDataGenerator: Verify failed on %s"), *FPaths::GetCleanFilename(FileToVerify));
}
FileReader->Close();
delete FileReader;
}
else
{
GLog->Logf(TEXT("BuildDataGenerator: ERROR VerifyFile cannot open %s"), *FileToVerify);
}
ProgressDelegate.ExecuteIfBound(1.0f);
return ReturnValue;
}
示例7: Save
void FShaderCache::Save(FArchive& Ar, const map<FGuid, FShader*>& InShaders)
{
Ar << m_nPlatform;
// serialize the global shader crc
UINT NumShaderBuilderCRC = m_mapShaderBuilderCRC.size();
Ar << NumShaderBuilderCRC;
map<FShaderBuilder*, DWORD>::iterator it;
for( it = m_mapShaderBuilderCRC.begin(); it != m_mapShaderBuilderCRC.end(); ++it )
{
FShaderBuilder* ShaderBuilder = it->first;
Ar << ShaderBuilder;
Ar << it->second;
}
// serialize the global shaders
UINT NumShaders = InShaders.size();
Ar << NumShaders;
for( map<FGuid, FShader*>::const_iterator it = InShaders.begin(); it != InShaders.end(); ++it )
{
FShader* Shader = it->second;
// shader builder的序列化,在加载时可用于检测此类型的shader是否仍存在
FShaderBuilder* ShaderBuilder = Shader->GetShaderBuilder();
FGuid ShaderId = Shader->GetId();
Ar << ShaderBuilder << ShaderId;
// 占个位先。。。应该记录序列化此shader的结束位置
INT SkipOffset = Ar.Tell();
Ar << SkipOffset;
Shader->Serialize(Ar);
INT EndOffset = Ar.Tell();
Ar.Seek(SkipOffset); // 定位回之前位置
Ar << EndOffset; // 记录此shader的结束位置
Ar.Seek(EndOffset); // 定位结束位置,继续下一个shader的序列化
}
}
示例8: SerializeLineageMoves
void UMeshAnimation::SerializeLineageMoves(FArchive &Ar)
{
guard(UMeshAnimation::SerializeLineageMoves);
if (Ar.ArVer < 123 || Ar.ArLicenseeVer < 0x19)
{
// standard UE2 format
Ar << Moves;
return;
}
assert(Ar.IsLoading);
int pos, count; // pos = global skip pos, count = data count
Ar << pos << AR_INDEX(count);
Moves.Empty(count);
for (int i = 0; i < count; i++)
{
int localPos;
Ar << localPos;
MotionChunk *M = new(Moves) MotionChunk;
Ar << *M;
assert(Ar.Tell() == localPos);
}
assert(Ar.Tell() == pos);
unguard;
}
示例9: BytecodeBufferSize
//------------------------------------------------------------------------------
FStructScriptLoader::FStructScriptLoader(UStruct* TargetScriptContainer, FArchive& Ar)
: BytecodeBufferSize(0)
, SerializedScriptSize(0)
, ScriptSerializationOffset(INDEX_NONE)
{
if (!Ar.IsLoading())
{
return;
}
Ar << BytecodeBufferSize;
Ar << SerializedScriptSize;
if (SerializedScriptSize > 0)
{
ScriptSerializationOffset = Ar.Tell();
}
ClearScriptCode(TargetScriptContainer);
}
示例10: GetCachedData
FCacheEntryMetadata* FRuntimeAssetCacheBackend::GetCachedData(const FName Bucket, const TCHAR* CacheKey, TArray<uint8>& OutData)
{
FCacheEntryMetadata* Result = nullptr;
FArchive* Ar = CreateReadArchive(Bucket, CacheKey);
if (!Ar)
{
return Result;
}
Result = PreloadMetadata(Ar);
int64 TotalSize = Ar->TotalSize();
int64 CurrentPosition = Ar->Tell();
int64 NumberOfBytesToSerialize = TotalSize - CurrentPosition;
OutData.Reset();
OutData.AddUninitialized(NumberOfBytesToSerialize);
Ar->Serialize(OutData.GetData(), NumberOfBytesToSerialize);
Ar->Close();
delete Ar;
return Result;
}
示例11: ReadTimeArray
static void ReadTimeArray(FArchive &Ar, int NumKeys, TArray<float> &Times, int NumFrames)
{
guard(ReadTimeArray);
Times.Empty(NumKeys);
if (NumKeys <= 1) return;
// appPrintf(" pos=%4X keys (max=%X)[ ", Ar.Tell(), NumFrames);
if (NumFrames < 256)
{
for (int k = 0; k < NumKeys; k++)
{
uint8 v;
Ar << v;
Times.Add(v);
// if (k < 4 || k > NumKeys - 5) appPrintf(" %02X ", v);
// else if (k == 4) appPrintf("...");
}
}
else
{
for (int k = 0; k < NumKeys; k++)
{
uint16 v;
Ar << v;
Times.Add(v);
// if (k < 4 || k > NumKeys - 5) appPrintf(" %04X ", v);
// else if (k == 4) appPrintf("...");
}
}
// appPrintf(" ]\n");
// align to 4 bytes
Ar.Seek(Align(Ar.Tell(), 4));
unguard;
}
示例12: VerifyChunkFile
bool FBuildPatchUtils::VerifyChunkFile( FArchive& ChunkFileData, bool bQuickCheck )
{
const int64 FileSize = ChunkFileData.TotalSize();
bool bSuccess = ChunkFileData.IsLoading();
if ( !bSuccess )
{
GLog->Logf( TEXT( "BuildPatchServices: ERROR: VerifyChunkFile expected readonly archive" ) );
}
else
{
// Read the header
FChunkHeader Header;
ChunkFileData << Header;
// Check header magic
if ( !Header.IsValidMagic() )
{
bSuccess = false;
GLog->Logf( TEXT( "BuildPatchServices: ERROR: VerifyChunkFile corrupt header" ) );
}
// Check Header and data size
if ( bSuccess && ( Header.HeaderSize + Header.DataSize ) != FileSize )
{
bSuccess = false;
GLog->Logf( TEXT( "BuildPatchServices: ERROR: VerifyChunkFile header info does not match file size" ) );
}
if( bSuccess && !bQuickCheck )
{
// Hashes for checking data
FSHA1 SHAHasher;
FSHAHashData SHAHash;
uint64 CycPoly64Hash = 0;
// Load the data to check
uint8* FileReadBuffer = new uint8[ FileBufferSize ];
int64 DataOffset = 0;
switch ( Header.StoredAs )
{
case FChunkHeader::STORED_RAW:
while( !ChunkFileData.AtEnd() )
{
const int64 SizeLeft = FileSize - ChunkFileData.Tell();
const uint32 ReadLen = FMath::Min< int64 >( FileBufferSize, SizeLeft );
ChunkFileData.Serialize( FileReadBuffer, ReadLen );
switch ( Header.HashType )
{
case FChunkHeader::HASH_ROLLING:
CycPoly64Hash = FCycPoly64Hash::GetHashForDataSet(FileReadBuffer, ReadLen, CycPoly64Hash);
break;
case FChunkHeader::HASH_SHA1:
SHAHasher.Update( FileReadBuffer, ReadLen );
break;
default:
check( false ); // @TODO LSwift: Implement other storage methods!
bSuccess = false;
break;
}
DataOffset += ReadLen;
}
if( bSuccess )
{
switch ( Header.HashType )
{
case FChunkHeader::HASH_ROLLING:
bSuccess = Header.RollingHash == CycPoly64Hash;
break;
case FChunkHeader::HASH_SHA1:
SHAHasher.Final();
SHAHasher.GetHash( SHAHash.Hash );
bSuccess = SHAHash == Header.SHAHash;
break;
}
if (!bSuccess)
{
GLog->Logf(TEXT("BuildPatchServices: ERROR: VerifyChunkFile file hashcheck failed"));
}
}
break;
default:
GLog->Logf( TEXT( "BuildPatchServices: ERROR: VerifyChunkFile failed, unknown storage type" ) );
bSuccess = false;
break;
}
delete[] FileReadBuffer;
}
}
return bSuccess;
}
示例13: ReserveChunkInventorySlotForce
void FBuildPatchChunkCache::ReserveChunkInventorySlotForce( const FGuid& ChunkGuid )
{
// If already reserved, return immediate
if( ChunkCache.HasReservation( ChunkGuid ) || ChunkCache.Contains( ChunkGuid ) )
{
return;
}
// Begin by checking if any slots can be freed
ChunkCache.PurgeUnreferenced();
// Try to add the reservation
bool bReservationAccepted = ChunkCache.TryAddReservation( ChunkGuid );
// If we couldn't reserve, we need to boot out a chunk for this required one
if( bReservationAccepted == false )
{
// We create a unique ref array from the use order so that chunks not needed
// for longer times end up nearer the bottom of the array
TArray< FGuid > ChunkPriorityList;
ChunkInfoLock.Lock();
for( int32 ChunkUseOrderStackIdx = ChunkUseOrderStack.Num() - 1; ChunkUseOrderStackIdx >= 0 ; --ChunkUseOrderStackIdx )
{
ChunkPriorityList.AddUnique( ChunkUseOrderStack[ ChunkUseOrderStackIdx ] );
}
ChunkInfoLock.Unlock();
// Starting at the bottom of the list, we look for a chunk that is contained in the cache
for( int32 ChunkPriorityListIdx = ChunkPriorityList.Num() - 1; ChunkPriorityListIdx >= 0 && !bReservationAccepted; --ChunkPriorityListIdx )
{
const FGuid& LowPriChunk = ChunkPriorityList[ ChunkPriorityListIdx ];
BuildProgress->WaitWhilePaused();
// Check if there were any errors while paused, like canceling
if( FBuildPatchInstallError::HasFatalError() )
{
return;
}
if( ChunkCache.Contains( LowPriChunk ) )
{
GWarn->Logf( TEXT( "FBuildPatchChunkCache: Booting chunk %s" ), *LowPriChunk.ToString() );
// Save chunk to disk so we don't have to download again
bool bSuccess = true;
const FString NewChunkFilename = FBuildPatchUtils::GetChunkOldFilename( ChunkCacheStage, LowPriChunk );
FChunkFile* LowPriChunkFile = ChunkCache.Get( LowPriChunk );
FChunkHeader* LowPriChunkHeader;
uint8* LowPriChunkData;
LowPriChunkFile->GetDataLock( &LowPriChunkData, &LowPriChunkHeader );
FArchive* FileOut = IFileManager::Get().CreateFileWriter( *NewChunkFilename );
bSuccess = FileOut != NULL;
const int32 LastError = FPlatformMisc::GetLastError();
if( bSuccess )
{
// Setup Header
*FileOut << *LowPriChunkHeader;
LowPriChunkHeader->HeaderSize = FileOut->Tell();
LowPriChunkHeader->StoredAs = FChunkHeader::STORED_RAW;
LowPriChunkHeader->DataSize = FBuildPatchData::ChunkDataSize; // This would change if compressing/encrypting
// Write out file
FileOut->Seek( 0 );
*FileOut << *LowPriChunkHeader;
FileOut->Serialize( LowPriChunkData, FBuildPatchData::ChunkDataSize );
FileOut->Close();
delete FileOut;
}
LowPriChunkFile->ReleaseDataLock();
// Setup new chunk origin
if( bSuccess )
{
ChunkOrigins[ LowPriChunk ] = EChunkOrigin::Harddisk;
}
else
{
// Queue download if save failed
ChunkOrigins[ LowPriChunk ] = EChunkOrigin::Download;
FBuildPatchDownloader::Get().AddChunkToDownload( LowPriChunk );
FBuildPatchAnalytics::RecordChunkCacheError( ChunkGuid, NewChunkFilename, LastError, TEXT( "ChunkBooting" ), TEXT( "Chunk Save Failed" ) );
}
// Boot this chunk
ChunkCache.Remove( LowPriChunk );
// Try get the reservation again!
bReservationAccepted = ChunkCache.TryAddReservation( ChunkGuid );
// Count the boot
NumChunksCacheBooted.Increment();
}
}
// We must have been able to make room
check( bReservationAccepted );
}
}
示例14: Serialize
void UTexture::Serialize(FArchive &Ar)
{
guard(UTexture::Serialize);
Super::Serialize(Ar);
#if BIOSHOCK
TRIBES_HDR(Ar, 0x2E);
if (Ar.Game == GAME_Bioshock && t3_hdrSV >= 1)
Ar << CachedBulkDataSize;
if (Ar.Game == GAME_Bioshock && Format == 12) // remap format; note: Bioshock used 3DC name, but real format is DXT5N
Format = TEXF_DXT5N;
#endif // BIOSHOCK
#if SWRC
if (Ar.Game == GAME_RepCommando)
{
if (Format == 14) Format = TEXF_CxV8U8; //?? not verified
}
#endif // SWRC
#if VANGUARD
if (Ar.Game == GAME_Vanguard && Ar.ArVer >= 128 && Ar.ArLicenseeVer >= 25)
{
// has some table for fast mipmap lookups
Ar.Seek(Ar.Tell() + 142); // skip that table
// serialize mips using AR_INDEX count (this game uses int for array counts in all other places)
int Count;
Ar << AR_INDEX(Count);
Mips.AddDefaulted(Count);
for (int i = 0; i < Count; i++)
Ar << Mips[i];
return;
}
#endif // VANGUARD
#if AA2
if (Ar.Game == GAME_AA2 && Ar.ArLicenseeVer >= 8)
{
int unk; // always 10619
Ar << unk;
}
#endif // AA2
Ar << Mips;
if (Ar.Engine() == GAME_UE1)
{
// UE1
bMasked = false; // ignored by UE1, used surface.PolyFlags instead (but UE2 ignores PolyFlags ...)
if (bHasComp) // skip compressed mipmaps
{
TArray<FMipmap> CompMips;
Ar << CompMips;
}
}
#if XIII
if (Ar.Game == GAME_XIII)
{
if (Ar.ArLicenseeVer >= 42)
{
// serialize palette
if (Format == TEXF_P8 || Format == 13) // 13 == TEXF_P4
{
assert(!Palette);
Palette = new UPalette;
Ar << Palette->Colors;
}
}
if (Ar.ArLicenseeVer >= 55)
Ar.Seek(Ar.Tell() + 3);
}
#endif // XIII
#if EXTEEL
if (Ar.Game == GAME_Exteel)
{
// note: this property is serialized as UObject's property too
byte MaterialType; // enum GFMaterialType
Ar << MaterialType;
}
#endif // EXTEEL
unguard;
}
示例15: ReadXprFile
static bool ReadXprFile(const CGameFileInfo *file)
{
guard(ReadXprFile);
FArchive *Ar = appCreateFileReader(file);
int Tag, FileLen, DataStart, DataCount;
*Ar << Tag << FileLen << DataStart << DataCount;
//?? "XPR0" - xpr variant with a single object (texture) inside
if (Tag != BYTES4('X','P','R','1'))
{
#if XPR_DEBUG
appPrintf("Unknown XPR tag in %s\n", file->RelativeName);
#endif
delete Ar;
return true;
}
#if XPR_DEBUG
appPrintf("Scanning %s ...\n", file->RelativeName);
#endif
XprInfo *Info = new(xprFiles) XprInfo;
Info->File = file;
Info->DataStart = DataStart;
// read filelist
int i;
for (i = 0; i < DataCount; i++)
{
int NameOffset, DataOffset;
*Ar << NameOffset << DataOffset;
int savePos = Ar->Tell();
Ar->Seek(NameOffset + 12);
// read name
char c, buf[256];
int n = 0;
while (true)
{
*Ar << c;
if (n < ARRAY_COUNT(buf))
buf[n++] = c;
if (!c) break;
}
buf[ARRAY_COUNT(buf)-1] = 0; // just in case
// create item
XprEntry *Entry = new(Info->Items) XprEntry;
appStrncpyz(Entry->Name, buf, ARRAY_COUNT(Entry->Name));
Entry->DataOffset = DataOffset + 12;
assert(Entry->DataOffset < DataStart);
// seek back
Ar->Seek(savePos);
// setup size of previous item
if (i >= 1)
{
XprEntry *PrevEntry = &Info->Items[i - 1];
PrevEntry->DataSize = Entry->DataOffset - PrevEntry->DataOffset;
}
// setup size of the last item
if (i == DataCount - 1)
Entry->DataSize = DataStart - Entry->DataOffset;
}
// scan data
// data block is either embedded in this block or followed after DataStart position
for (i = 0; i < DataCount; i++)
{
XprEntry *Entry = &Info->Items[i];
#if XPR_DEBUG
// appPrintf(" %08X [%08X] %s\n", Entry->DataOffset, Entry->DataSize, Entry->Name);
#endif
Ar->Seek(Entry->DataOffset);
int id;
*Ar << id;
switch (id)
{
case 0x80020001:
// header is 4 dwords + immediately followed data
Entry->DataOffset += 4 * 4;
Entry->DataSize -= 4 * 4;
break;
case 0x00040001:
// header is 5 dwords + external data
{
int pos;
*Ar << pos;
Entry->DataOffset = DataStart + pos;
}
break;
case 0x00020001:
// header is 4 dwords + external data
{
int d1, d2, pos;
*Ar << d1 << d2 << pos;
Entry->DataOffset = DataStart + pos;
}
break;
default:
// header is 2 dwords - offset and size + external data
{
//.........这里部分代码省略.........