本文整理汇总了C++中STORM_FREE函数的典型用法代码示例。如果您正苦于以下问题:C++ STORM_FREE函数的具体用法?C++ STORM_FREE怎么用?C++ STORM_FREE使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了STORM_FREE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: LoadFilePatch_BSD0
static int LoadFilePatch_BSD0(TMPQFile * hf, PMPQ_PATCH_HEADER pFullPatch)
{
unsigned char * pbDecompressed = (unsigned char *)(pFullPatch + 1);
unsigned char * pbCompressed = NULL;
size_t cbDecompressed = 0;
size_t cbCompressed = 0;
size_t dwBytesRead = 0;
int nError = ERROR_SUCCESS;
/* Calculate the size of compressed data */
cbDecompressed = pFullPatch->dwSizeOfPatchData - sizeof(MPQ_PATCH_HEADER);
cbCompressed = pFullPatch->dwXfrmBlockSize - SIZE_OF_XFRM_HEADER;
/* Is that file compressed? */
if(cbCompressed < cbDecompressed)
{
pbCompressed = STORM_ALLOC(uint8_t, cbCompressed);
if(pbCompressed == NULL)
nError = ERROR_NOT_ENOUGH_MEMORY;
/* Read the compressed patch data */
if(nError == ERROR_SUCCESS)
{
SFileReadFile((void *)hf, pbCompressed, cbCompressed, &dwBytesRead);
if(dwBytesRead != cbCompressed)
nError = ERROR_FILE_CORRUPT;
}
/* Decompress the data */
if(nError == ERROR_SUCCESS)
Decompress_RLE(pbDecompressed, cbDecompressed, pbCompressed, cbCompressed);
if(pbCompressed != NULL)
STORM_FREE(pbCompressed);
}
else
{
SFileReadFile((void *)hf, pbDecompressed, cbDecompressed, &dwBytesRead);
if(dwBytesRead != cbDecompressed)
nError = ERROR_FILE_CORRUPT;
}
return nError;
}
示例2: FindPatchPrefix_SC2_MatchFiles
static bool FindPatchPrefix_SC2_MatchFiles(TMPQArchive * haBase, TMPQArchive * haPatch, TFileEntry * pBaseEntry)
{
TMPQNamePrefix * pPatchPrefix;
char * szPatchFileName;
char * szPlainName;
size_t cchWorkBuffer = 0x400;
bool bResult = false;
// First-level patches: Find the same file within the patch archive
// and verify by MD5-before-patch
if(haBase->haPatch == NULL)
{
TFileEntry * pFileTableEnd = haPatch->pFileTable + haPatch->dwFileTableSize;
TFileEntry * pFileEntry;
// Allocate working buffer for merging LST file
szPatchFileName = STORM_ALLOC(char, cchWorkBuffer);
if(szPatchFileName != NULL)
{
// Parse the entire file table
for(pFileEntry = haPatch->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
{
// Look for "patch_metadata" file
if(IsPatchMetadataFile(pFileEntry))
{
// Construct the name of the MD5 file
strcpy(szPatchFileName, pFileEntry->szFileName);
szPlainName = (char *)GetPlainFileName(szPatchFileName);
strcpy(szPlainName, pBaseEntry->szFileName);
// Check for matching MD5 file
if(IsMatchingPatchFile(haPatch, szPatchFileName, pBaseEntry->md5))
{
bResult = CreatePatchPrefix(haPatch, szPatchFileName, (size_t)(szPlainName - szPatchFileName));
break;
}
}
}
// Delete the merge buffer
STORM_FREE(szPatchFileName);
}
}
示例3: Compress_PKLIB
static void Compress_PKLIB(void * pvOutBuffer, int * pcbOutBuffer, void * pvInBuffer, int cbInBuffer, int * pCmpType, int nCmpLevel)
{
TDataInfo Info; // Data information
char * work_buf = STORM_ALLOC(char, CMP_BUFFER_SIZE);// Pklib's work buffer
unsigned int dict_size; // Dictionary size
unsigned int ctype = CMP_BINARY; // Compression type
// Keep compilers happy
STORMLIB_UNUSED(pCmpType);
STORMLIB_UNUSED(nCmpLevel);
// Handle no-memory condition
if(work_buf != NULL)
{
// Fill data information structure
memset(work_buf, 0, CMP_BUFFER_SIZE);
Info.pbInBuff = (unsigned char *)pvInBuffer;
Info.pbInBuffEnd = (unsigned char *)pvInBuffer + cbInBuffer;
Info.pbOutBuff = (unsigned char *)pvOutBuffer;
Info.pbOutBuffEnd = (unsigned char *)pvOutBuffer + *pcbOutBuffer;
//
// Set the dictionary size
//
// Diablo I uses fixed dictionary size of CMP_IMPLODE_DICT_SIZE3
// Starcraft I uses the variable dictionary size based on algorithm below
//
if (cbInBuffer < 0x600)
dict_size = CMP_IMPLODE_DICT_SIZE1;
else if(0x600 <= cbInBuffer && cbInBuffer < 0xC00)
dict_size = CMP_IMPLODE_DICT_SIZE2;
else
dict_size = CMP_IMPLODE_DICT_SIZE3;
// Do the compression
if(implode(ReadInputData, WriteOutputData, work_buf, &Info, &ctype, &dict_size) == CMP_NO_ERROR)
*pcbOutBuffer = (int)(Info.pbOutBuff - (unsigned char *)pvOutBuffer);
STORM_FREE(work_buf);
}
}
示例4: Compress_PKLIB
static void Compress_PKLIB(
char * pbOutBuffer,
int * pcbOutBuffer,
char * pbInBuffer,
int cbInBuffer,
int * /* pCmpType */,
int /* nCmpLevel */)
{
TDataInfo Info; // Data information
char * work_buf = STORM_ALLOC(char, CMP_BUFFER_SIZE);// Pklib's work buffer
unsigned int dict_size; // Dictionary size
unsigned int ctype = CMP_BINARY; // Compression type
// Fill data information structure
memset(work_buf, 0, CMP_BUFFER_SIZE);
Info.pbInBuff = pbInBuffer;
Info.pbInBuffEnd = pbInBuffer + cbInBuffer;
Info.pbOutBuff = pbOutBuffer;
Info.pbOutBuffEnd = pbOutBuffer + *pcbOutBuffer;
//
// Set the dictionary size
//
// Diablo I ues fixed dictionary size of CMP_IMPLODE_DICT_SIZE3
// Starcraft uses the variable dictionary size based on algorithm below
//
if (cbInBuffer < 0x600)
dict_size = CMP_IMPLODE_DICT_SIZE1;
else if(0x600 <= cbInBuffer && cbInBuffer < 0xC00)
dict_size = CMP_IMPLODE_DICT_SIZE2;
else
dict_size = CMP_IMPLODE_DICT_SIZE3;
// Do the compression
if(implode(ReadInputData, WriteOutputData, work_buf, &Info, &ctype, &dict_size) == CMP_NO_ERROR)
*pcbOutBuffer = (int)(Info.pbOutBuff - pbOutBuffer);
STORM_FREE(work_buf);
}
示例5: ApplyMpqPatch_COPY
static int ApplyMpqPatch_COPY(
TMPQFile * hf,
TPatchHeader * pPatchHeader)
{
LPBYTE pbNewFileData;
DWORD cbNewFileData;
// Allocate space for new file data
cbNewFileData = pPatchHeader->dwXfrmBlockSize - SIZE_OF_XFRM_HEADER;
pbNewFileData = STORM_ALLOC(BYTE, cbNewFileData);
if(pbNewFileData == NULL)
return ERROR_NOT_ENOUGH_MEMORY;
// Copy the patch data as-is
memcpy(pbNewFileData, (LPBYTE)pPatchHeader + sizeof(TPatchHeader), cbNewFileData);
// Free the old file data
STORM_FREE(hf->pbFileData);
// Put the new file data there
hf->pbFileData = pbNewFileData;
hf->cbFileData = cbNewFileData;
return ERROR_SUCCESS;
}
示例6: Decompress_PKLIB
static int Decompress_PKLIB(char * pbOutBuffer, int * pcbOutBuffer, char * pbInBuffer, int cbInBuffer)
{
TDataInfo Info; // Data information
char * work_buf = STORM_ALLOC(char, EXP_BUFFER_SIZE);// Pklib's work buffer
// Fill data information structure
memset(work_buf, 0, EXP_BUFFER_SIZE);
Info.pbInBuff = pbInBuffer;
Info.pbInBuffEnd = pbInBuffer + cbInBuffer;
Info.pbOutBuff = pbOutBuffer;
Info.pbOutBuffEnd = pbOutBuffer + *pcbOutBuffer;
// Do the decompression
explode(ReadInputData, WriteOutputData, work_buf, &Info);
// If PKLIB is unable to decompress the data, return 0;
if(Info.pbOutBuff == pbOutBuffer)
return 0;
// Give away the number of decompressed bytes
*pcbOutBuffer = (int)(Info.pbOutBuff - pbOutBuffer);
STORM_FREE(work_buf);
return 1;
}
示例7: ApplyMpqPatch_BSD0
static int ApplyMpqPatch_BSD0(
TMPQFile * hf,
TPatchHeader * pPatchHeader)
{
PBLIZZARD_BSDIFF40_FILE pBsdiff;
LPDWORD pCtrlBlock;
LPBYTE pbPatchData = (LPBYTE)pPatchHeader + sizeof(TPatchHeader);
LPBYTE pDataBlock;
LPBYTE pExtraBlock;
LPBYTE pbNewData = NULL;
LPBYTE pbOldData = (LPBYTE)hf->pbFileData;
DWORD dwNewOffset = 0; // Current position to patch
DWORD dwOldOffset = 0; // Current source position
DWORD dwNewSize; // Patched file size
DWORD dwOldSize = hf->cbFileData; // File size before patch
// Get pointer to the patch header
// Format of BSDIFF header corresponds to original BSDIFF, which is:
// 0000 8 bytes signature "BSDIFF40"
// 0008 8 bytes size of the control block
// 0010 8 bytes size of the data block
// 0018 8 bytes new size of the patched file
pBsdiff = (PBLIZZARD_BSDIFF40_FILE)pbPatchData;
pbPatchData += sizeof(BLIZZARD_BSDIFF40_FILE);
// Get pointer to the 32-bit BSDIFF control block
// The control block follows immediately after the BSDIFF header
// and consists of three 32-bit integers
// 0000 4 bytes Length to copy from the BSDIFF data block the new file
// 0004 4 bytes Length to copy from the BSDIFF extra block
// 0008 4 bytes Size to increment source file offset
pCtrlBlock = (LPDWORD)pbPatchData;
pbPatchData += (size_t)BSWAP_INT64_UNSIGNED(pBsdiff->CtrlBlockSize);
// Get the pointer to the data block
pDataBlock = (LPBYTE)pbPatchData;
pbPatchData += (size_t)BSWAP_INT64_UNSIGNED(pBsdiff->DataBlockSize);
// Get the pointer to the extra block
pExtraBlock = (LPBYTE)pbPatchData;
dwNewSize = (DWORD)BSWAP_INT64_UNSIGNED(pBsdiff->NewFileSize);
// Allocate new buffer
pbNewData = STORM_ALLOC(BYTE, dwNewSize);
if(pbNewData == NULL)
return ERROR_NOT_ENOUGH_MEMORY;
// Now patch the file
while(dwNewOffset < dwNewSize)
{
DWORD dwAddDataLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[0]);
DWORD dwMovDataLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[1]);
DWORD dwOldMoveLength = BSWAP_INT32_UNSIGNED(pCtrlBlock[2]);
DWORD i;
// Sanity check
if((dwNewOffset + dwAddDataLength) > dwNewSize)
{
STORM_FREE(pbNewData);
return ERROR_FILE_CORRUPT;
}
// Read the diff string to the target buffer
memcpy(pbNewData + dwNewOffset, pDataBlock, dwAddDataLength);
pDataBlock += dwAddDataLength;
// Now combine the patch data with the original file
for(i = 0; i < dwAddDataLength; i++)
{
if(dwOldOffset < dwOldSize)
pbNewData[dwNewOffset] = pbNewData[dwNewOffset] + pbOldData[dwOldOffset];
dwNewOffset++;
dwOldOffset++;
}
// Sanity check
if((dwNewOffset + dwMovDataLength) > dwNewSize)
{
STORM_FREE(pbNewData);
return ERROR_FILE_CORRUPT;
}
// Copy the data from the extra block in BSDIFF patch
memcpy(pbNewData + dwNewOffset, pExtraBlock, dwMovDataLength);
pExtraBlock += dwMovDataLength;
dwNewOffset += dwMovDataLength;
// Move the old offset
if(dwOldMoveLength & 0x80000000)
dwOldMoveLength = 0x80000000 - dwOldMoveLength;
dwOldOffset += dwOldMoveLength;
pCtrlBlock += 3;
}
// Free the old file data
STORM_FREE(hf->pbFileData);
// Put the new data to the fil structure
hf->pbFileData = pbNewData;
//.........这里部分代码省略.........
示例8: SFileCompactArchive
//.........这里部分代码省略.........
/* First of all, we have to check of we are able to decrypt all files. */
/* If not, sorry, but the archive cannot be compacted. */
if(nError == ERROR_SUCCESS)
{
/* Initialize the progress variables for compact callback */
FileStream_GetSize(ha->pStream, &(ha->CompactTotalBytes));
ha->CompactBytesProcessed = 0;
nError = CheckIfAllKeysKnown(ha, szListFile, pFileKeys);
}
/* Get the temporary file name and create it */
if(nError == ERROR_SUCCESS)
{
strcpy(szTempFile, FileStream_GetFileName(ha->pStream));
if((szTemp = strrchr(szTempFile, '.')) != NULL)
strcpy(szTemp + 1, "mp_");
else
strcat(szTempFile, "_");
pTempStream = FileStream_CreateFile(szTempFile, STREAM_PROVIDER_FLAT | BASE_PROVIDER_FILE);
if(pTempStream == NULL)
nError = GetLastError();
}
/* Write the data before MPQ user data (if any) */
if(nError == ERROR_SUCCESS && ha->UserDataPos != 0)
{
/* Inform the application about the progress */
if(ha->pfnCompactCB != NULL)
ha->pfnCompactCB(ha->pvCompactUserData, CCB_COPYING_NON_MPQ_DATA, ha->CompactBytesProcessed, ha->CompactTotalBytes);
ByteOffset = 0;
ByteCount = ha->UserDataPos;
nError = CopyNonMpqData(ha, ha->pStream, pTempStream, &ByteOffset, ByteCount);
}
/* Write the MPQ user data (if any) */
if(nError == ERROR_SUCCESS && ha->MpqPos > ha->UserDataPos)
{
/* At this point, we assume that the user data size is equal */
/* to pUserData->dwHeaderOffs. */
/* If this assumption doesn't work, then we have an unknown version of MPQ */
ByteOffset = ha->UserDataPos;
ByteCount = ha->MpqPos - ha->UserDataPos;
assert(ha->pUserData != NULL);
assert(ha->pUserData->dwHeaderOffs == ByteCount);
nError = CopyNonMpqData(ha, ha->pStream, pTempStream, &ByteOffset, ByteCount);
}
/* Write the MPQ header */
if(nError == ERROR_SUCCESS)
{
TMPQHeader SaveMpqHeader;
/* Write the MPQ header to the file */
memcpy(&SaveMpqHeader, ha->pHeader, ha->pHeader->dwHeaderSize);
BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_1);
BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_2);
BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_3);
BSWAP_TMPQHEADER(&SaveMpqHeader, MPQ_FORMAT_VERSION_4);
if(!FileStream_Write(pTempStream, NULL, &SaveMpqHeader, ha->pHeader->dwHeaderSize))
nError = GetLastError();
/* Update the progress */
ha->CompactBytesProcessed += ha->pHeader->dwHeaderSize;
}
/* Now copy all files */
if(nError == ERROR_SUCCESS)
nError = CopyMpqFiles(ha, pFileKeys, pTempStream);
/* If succeeded, switch the streams */
if(nError == ERROR_SUCCESS)
{
ha->dwFlags |= MPQ_FLAG_CHANGED;
if(FileStream_Replace(ha->pStream, pTempStream))
pTempStream = NULL;
else
nError = ERROR_CAN_NOT_COMPLETE;
}
/* Final user notification */
if(nError == ERROR_SUCCESS && ha->pfnCompactCB != NULL)
{
ha->CompactBytesProcessed += (ha->pHeader->dwHashTableSize * sizeof(TMPQHash));
ha->CompactBytesProcessed += (ha->dwFileTableSize * sizeof(TMPQBlock));
ha->pfnCompactCB(ha->pvCompactUserData, CCB_CLOSING_ARCHIVE, ha->CompactBytesProcessed, ha->CompactTotalBytes);
}
/* Cleanup and return */
if(pTempStream != NULL)
FileStream_Close(pTempStream);
if(pFileKeys != NULL)
STORM_FREE(pFileKeys);
if(nError != ERROR_SUCCESS)
SetLastError(nError);
return (nError == ERROR_SUCCESS);
}
示例9: ReadMpqFileSingleUnit
static int ReadMpqFileSingleUnit(TMPQFile * hf, void * pvBuffer, DWORD dwFilePos, DWORD dwToRead, LPDWORD pdwBytesRead)
{
ULONGLONG RawFilePos = hf->RawFilePos;
TMPQArchive * ha = hf->ha;
TFileEntry * pFileEntry = hf->pFileEntry;
LPBYTE pbCompressed = NULL;
LPBYTE pbRawData = NULL;
bool bIsReallyCompressed = false;
int nError = ERROR_SUCCESS;
// If the file buffer is not allocated yet, do it.
if(hf->pbFileSector == NULL)
{
nError = AllocateSectorBuffer(hf);
if(nError != ERROR_SUCCESS)
return nError;
pbRawData = hf->pbFileSector;
}
// If the file is a patch file, adjust raw data offset
if(hf->pPatchInfo != NULL)
RawFilePos += hf->pPatchInfo->dwLength;
// If the file sector is not loaded yet, do it
if(hf->dwSectorOffs != 0)
{
// Is the file compressed?
if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
{
// Allocate space for compressed data
pbCompressed = STORM_ALLOC(BYTE, pFileEntry->dwCmpSize);
if(pbCompressed == NULL)
return ERROR_NOT_ENOUGH_MEMORY;
bIsReallyCompressed = true;
pbRawData = pbCompressed;
}
// Load the raw (compressed, encrypted) data
if(!FileStream_Read(ha->pStream, &RawFilePos, pbRawData, pFileEntry->dwCmpSize))
{
STORM_FREE(pbCompressed);
return GetLastError();
}
// If the file is encrypted, we have to decrypt the data first
if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
{
BSWAP_ARRAY32_UNSIGNED(pbRawData, pFileEntry->dwCmpSize);
DecryptMpqBlock(pbRawData, pFileEntry->dwCmpSize, hf->dwFileKey);
BSWAP_ARRAY32_UNSIGNED(pbRawData, pFileEntry->dwCmpSize);
}
//
// In "wow-update-12694.MPQ" from Wow-Cataclysm BETA:
//
// File CmpSize FileSize Data
// -------------------------------------- ------- -------- ---------------
// esES\DBFilesClient\LightSkyBox.dbc 0xBE 0xBC Is compressed
// deDE\DBFilesClient\MountCapability.dbc 0x93 0x77 Is uncompressed
//
// Now tell me how to deal with this mess.
//
if(hf->pPatchInfo != NULL)
{
if(pbRawData[0] == 'P' && pbRawData[1] == 'T' && pbRawData[2] == 'C' && pbRawData[3] == 'H')
{
assert(pFileEntry->dwCmpSize >= hf->dwDataSize);
bIsReallyCompressed = false;
}
}
else
{
if(pFileEntry->dwCmpSize >= hf->dwDataSize)
bIsReallyCompressed = false;
}
// If the file is compressed, we have to decompress it now
if(bIsReallyCompressed)
{
int cbOutBuffer = (int)hf->dwDataSize;
// Note: Single unit files compressed with IMPLODE are not supported by Blizzard
if(pFileEntry->dwFlags & MPQ_FILE_IMPLODE)
{
if(!SCompExplode((char *)hf->pbFileSector, &cbOutBuffer, (char *)pbRawData, (int)pFileEntry->dwCmpSize))
nError = ERROR_FILE_CORRUPT;
}
if(pFileEntry->dwFlags & MPQ_FILE_COMPRESS)
{
if(!SCompDecompress((char *)hf->pbFileSector, &cbOutBuffer, (char *)pbRawData, (int)pFileEntry->dwCmpSize))
nError = ERROR_FILE_CORRUPT;
}
}
else
{
if(pbRawData != hf->pbFileSector)
memcpy(hf->pbFileSector, pbRawData, hf->dwDataSize);
}
//.........这里部分代码省略.........
示例10: RecryptFileData
static int RecryptFileData(
TMPQArchive * ha,
TMPQFile * hf,
const char * szFileName,
const char * szNewFileName)
{
ULONGLONG RawFilePos;
TFileEntry * pFileEntry = hf->pFileEntry;
DWORD dwBytesToRecrypt = pFileEntry->dwCmpSize;
DWORD dwOldKey;
DWORD dwNewKey;
int nError = ERROR_SUCCESS;
// The file must be encrypted
assert(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED);
// File decryption key is calculated from the plain name
szNewFileName = GetPlainFileNameA(szNewFileName);
szFileName = GetPlainFileNameA(szFileName);
// Calculate both file keys
dwOldKey = DecryptFileKey(szFileName, pFileEntry->ByteOffset, pFileEntry->dwFileSize, pFileEntry->dwFlags);
dwNewKey = DecryptFileKey(szNewFileName, pFileEntry->ByteOffset, pFileEntry->dwFileSize, pFileEntry->dwFlags);
// Incase the keys are equal, don't recrypt the file
if(dwNewKey == dwOldKey)
return ERROR_SUCCESS;
hf->dwFileKey = dwOldKey;
// Calculate the raw position of the file in the archive
hf->MpqFilePos = pFileEntry->ByteOffset;
hf->RawFilePos = ha->MpqPos + hf->MpqFilePos;
// Allocate buffer for file transfer
nError = AllocateSectorBuffer(hf);
if(nError != ERROR_SUCCESS)
return nError;
// Also allocate buffer for sector offsets
// Note: Don't load sector checksums, we don't need to recrypt them
nError = AllocateSectorOffsets(hf, true);
if(nError != ERROR_SUCCESS)
return nError;
// If we have sector offsets, recrypt these as well
if(hf->SectorOffsets != NULL)
{
// Allocate secondary buffer for sectors copy
DWORD * SectorOffsetsCopy = (DWORD *)STORM_ALLOC(BYTE, hf->SectorOffsets[0]);
DWORD dwSectorOffsLen = hf->SectorOffsets[0];
if(SectorOffsetsCopy == NULL)
return ERROR_NOT_ENOUGH_MEMORY;
// Recrypt the array of sector offsets
memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen);
EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwNewKey - 1);
BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen);
// Write the recrypted array back
if(!FileStream_Write(ha->pStream, &hf->RawFilePos, SectorOffsetsCopy, dwSectorOffsLen))
nError = GetLastError();
STORM_FREE(SectorOffsetsCopy);
}
// Now we have to recrypt all file sectors. We do it without
// recompression, because recompression is not necessary in this case
if(nError == ERROR_SUCCESS)
{
for(DWORD dwSector = 0; dwSector < hf->dwSectorCount; dwSector++)
{
DWORD dwRawDataInSector = hf->dwSectorSize;
DWORD dwRawByteOffset = dwSector * hf->dwSectorSize;
// Last sector: If there is not enough bytes remaining in the file, cut the raw size
if(dwRawDataInSector > dwBytesToRecrypt)
dwRawDataInSector = dwBytesToRecrypt;
// Fix the raw data length if the file is compressed
if(hf->SectorOffsets != NULL)
{
dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector];
dwRawByteOffset = hf->SectorOffsets[dwSector];
}
// Calculate the raw file offset of the file sector
CalculateRawSectorOffset(RawFilePos, hf, dwRawByteOffset);
// Read the file sector
if(!FileStream_Read(ha->pStream, &RawFilePos, hf->pbFileSector, dwRawDataInSector))
{
nError = GetLastError();
break;
}
// If necessary, re-encrypt the sector
// Note: Recompression is not necessary here. Unlike encryption,
// the compression does not depend on the position of the file in MPQ.
BSWAP_ARRAY32_UNSIGNED(hf->pbFileSector, dwRawDataInSector);
DecryptMpqBlock(hf->pbFileSector, dwRawDataInSector, dwOldKey + dwSector);
//.........这里部分代码省略.........
示例11: CopyMpqFileSectors
// Copies all file sectors into another archive.
static int CopyMpqFileSectors(
TMPQArchive * ha,
TMPQFile * hf,
TFileStream * pNewStream)
{
TFileEntry * pFileEntry = hf->pFileEntry;
ULONGLONG RawFilePos; // Used for calculating sector offset in the old MPQ archive
ULONGLONG MpqFilePos; // MPQ file position in the new archive
DWORD dwBytesToCopy = pFileEntry->dwCmpSize;
DWORD dwPatchSize = 0; // Size of patch header
DWORD dwFileKey1 = 0; // File key used for decryption
DWORD dwFileKey2 = 0; // File key used for encryption
DWORD dwCmpSize = 0; // Compressed file size, including patch header
int nError = ERROR_SUCCESS;
// Remember the position in the destination file
FileStream_GetPos(pNewStream, &MpqFilePos);
MpqFilePos -= ha->MpqPos;
// Resolve decryption keys. Note that the file key given
// in the TMPQFile structure also includes the key adjustment
if(nError == ERROR_SUCCESS && (pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED))
{
dwFileKey2 = dwFileKey1 = hf->dwFileKey;
if(pFileEntry->dwFlags & MPQ_FILE_FIX_KEY)
{
dwFileKey2 = (dwFileKey1 ^ pFileEntry->dwFileSize) - (DWORD)pFileEntry->ByteOffset;
dwFileKey2 = (dwFileKey2 + (DWORD)MpqFilePos) ^ pFileEntry->dwFileSize;
}
}
// If we have to save patch header, do it
if(nError == ERROR_SUCCESS && hf->pPatchInfo != NULL)
{
BSWAP_ARRAY32_UNSIGNED(hf->pPatchInfo, sizeof(DWORD) * 3);
if(!FileStream_Write(pNewStream, NULL, hf->pPatchInfo, hf->pPatchInfo->dwLength))
nError = GetLastError();
// Save the size of the patch info
dwPatchSize = hf->pPatchInfo->dwLength;
}
// If we have to save sector offset table, do it.
if(nError == ERROR_SUCCESS && hf->SectorOffsets != NULL)
{
DWORD * SectorOffsetsCopy = STORM_ALLOC(DWORD, hf->SectorOffsets[0] / sizeof(DWORD));
DWORD dwSectorOffsLen = hf->SectorOffsets[0];
assert((pFileEntry->dwFlags & MPQ_FILE_SINGLE_UNIT) == 0);
assert(pFileEntry->dwFlags & MPQ_FILE_COMPRESS_MASK);
if(SectorOffsetsCopy == NULL)
nError = ERROR_NOT_ENOUGH_MEMORY;
// Encrypt the secondary sector offset table and write it to the target file
if(nError == ERROR_SUCCESS)
{
memcpy(SectorOffsetsCopy, hf->SectorOffsets, dwSectorOffsLen);
if(pFileEntry->dwFlags & MPQ_FILE_ENCRYPTED)
EncryptMpqBlock(SectorOffsetsCopy, dwSectorOffsLen, dwFileKey2 - 1);
BSWAP_ARRAY32_UNSIGNED(SectorOffsetsCopy, dwSectorOffsLen);
if(!FileStream_Write(pNewStream, NULL, SectorOffsetsCopy, dwSectorOffsLen))
nError = GetLastError();
dwBytesToCopy -= dwSectorOffsLen;
dwCmpSize += dwSectorOffsLen;
}
// Update compact progress
if(ha->pfnCompactCB != NULL)
{
ha->CompactBytesProcessed += dwSectorOffsLen;
ha->pfnCompactCB(ha->pvCompactUserData, CCB_COMPACTING_FILES, ha->CompactBytesProcessed, ha->CompactTotalBytes);
}
STORM_FREE(SectorOffsetsCopy);
}
// Now we have to copy all file sectors. We do it without
// recompression, because recompression is not necessary in this case
if(nError == ERROR_SUCCESS)
{
for(DWORD dwSector = 0; dwSector < hf->dwSectorCount; dwSector++)
{
DWORD dwRawDataInSector = hf->dwSectorSize;
DWORD dwRawByteOffset = dwSector * hf->dwSectorSize;
// Fix the raw data length if the file is compressed
if(hf->SectorOffsets != NULL)
{
dwRawDataInSector = hf->SectorOffsets[dwSector+1] - hf->SectorOffsets[dwSector];
dwRawByteOffset = hf->SectorOffsets[dwSector];
}
// Last sector: If there is not enough bytes remaining in the file, cut the raw size
if(dwRawDataInSector > dwBytesToCopy)
dwRawDataInSector = dwBytesToCopy;
//.........这里部分代码省略.........
示例12: SFileSetMaxFileCount
//.........这里部分代码省略.........
nError = ERROR_NOT_ENOUGH_MEMORY;
}
// Now reallocate the file table
if(nError == ERROR_SUCCESS)
{
// Save the current file table
dwOldFileTableSize = ha->dwFileTableSize;
pOldFileTable = ha->pFileTable;
// Create new one
ha->pFileTable = STORM_ALLOC(TFileEntry, dwMaxFileCount);
if(ha->pFileTable != NULL)
memset(ha->pFileTable, 0, dwMaxFileCount * sizeof(TFileEntry));
else
nError = ERROR_NOT_ENOUGH_MEMORY;
}
// Now we have to build both classic hash table and HET table.
if(nError == ERROR_SUCCESS)
{
DWORD dwFileIndex = 0;
DWORD dwHashIndex = 0;
// Create new hash and HET entry for each file
pFileEntry = ha->pFileTable;
for(pOldFileEntry = pOldFileTable; pOldFileEntry < pOldFileTableEnd; pOldFileEntry++)
{
if(pOldFileEntry->dwFlags & MPQ_FILE_EXISTS)
{
// Copy the old file entry to the new one
memcpy(pFileEntry, pOldFileEntry, sizeof(TFileEntry));
assert(pFileEntry->szFileName != NULL);
// Create new entry in the hash table
if(ha->pHashTable != NULL)
{
dwHashIndex = AllocateHashEntry(ha, pFileEntry);
if(dwHashIndex == HASH_ENTRY_FREE)
{
nError = ERROR_CAN_NOT_COMPLETE;
break;
}
}
// Create new entry in the HET table, if needed
if(ha->pHetTable != NULL)
{
dwHashIndex = AllocateHetEntry(ha, pFileEntry);
if(dwHashIndex == HASH_ENTRY_FREE)
{
nError = ERROR_CAN_NOT_COMPLETE;
break;
}
}
// Move to the next file entry in the new table
pFileEntry++;
dwFileIndex++;
}
}
}
// Mark the archive as changed
// Note: We always have to rebuild the (attributes) file due to file table change
if(nError == ERROR_SUCCESS)
{
ha->dwMaxFileCount = dwMaxFileCount;
InvalidateInternalFiles(ha);
}
else
{
// Revert the hash table
if(ha->pHashTable != NULL && pOldHashTable != NULL)
{
STORM_FREE(ha->pHashTable);
ha->pHeader->dwHashTableSize = dwOldHashTableSize;
ha->pHashTable = pOldHashTable;
}
// Revert the HET table
if(ha->pHetTable != NULL && pOldHetTable != NULL)
{
FreeHetTable(ha->pHetTable);
ha->pHetTable = pOldHetTable;
}
// Revert the file table
if(pOldFileTable != NULL)
{
STORM_FREE(ha->pFileTable);
ha->pFileTable = pOldFileTable;
}
SetLastError(nError);
}
// Return the result
return (nError == ERROR_SUCCESS);
}
示例13: SListFileSaveToMpq
//.........这里部分代码省略.........
// Note: in MPQs with multiple locale versions of the same file,
// this code causes adding multiple listfile entries.
// Since those MPQs were last time used in Starcraft,
// we leave it as it is.
for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
{
// Only take existing items
if((pFileEntry->dwFlags & MPQ_FILE_EXISTS) && pFileEntry->szFileName != NULL)
{
// Ignore pseudo-names
if(!IsPseudoFileName(pFileEntry->szFileName, NULL) && !IsInternalMpqFileName(pFileEntry->szFileName))
{
SortTable[nFileNodes++] = pFileEntry->szFileName;
}
}
}
// Sort the table
qsort(SortTable, nFileNodes, sizeof(char *), CompareFileNodes);
// Now parse the table of file names again - remove duplicates
// and count file size.
if(nFileNodes != 0)
{
// Count the 0-th item
dwFileSize += (DWORD)strlen(SortTable[0]) + 2;
szPrevItem = SortTable[0];
// Count all next items
for(i = 1; i < nFileNodes; i++)
{
// If the item is the same like the last one, skip it
if(_stricmp(SortTable[i], szPrevItem))
{
dwFileSize += (DWORD)strlen(SortTable[i]) + 2;
szPrevItem = SortTable[i];
}
}
// Determine the flags for (listfile)
if(ha->dwFileFlags1 == 0)
ha->dwFileFlags1 = GetDefaultSpecialFileFlags(ha, dwFileSize);
// Create the listfile in the MPQ
nError = SFileAddFile_Init(ha, LISTFILE_NAME,
0,
dwFileSize,
LANG_NEUTRAL,
ha->dwFileFlags1 | MPQ_FILE_REPLACEEXISTING,
&hf);
// Add all file names
if(nError == ERROR_SUCCESS)
{
// Each name is followed by newline ("\x0D\x0A")
szPrevItem = SortTable[0];
nError = WriteListFileLine(hf, SortTable[0]);
// Count all next items
for(i = 1; i < nFileNodes; i++)
{
// If the item is the same like the last one, skip it
if(_stricmp(SortTable[i], szPrevItem))
{
WriteListFileLine(hf, SortTable[i]);
szPrevItem = SortTable[i];
}
}
}
}
else
{
// Create the listfile in the MPQ
dwFileSize = (DWORD)strlen(LISTFILE_NAME) + 2;
nError = SFileAddFile_Init(ha, LISTFILE_NAME,
0,
dwFileSize,
LANG_NEUTRAL,
MPQ_FILE_ENCRYPTED | MPQ_FILE_COMPRESS | MPQ_FILE_REPLACEEXISTING,
&hf);
// Just add "(listfile)" there
if(nError == ERROR_SUCCESS)
{
WriteListFileLine(hf, LISTFILE_NAME);
}
}
// Finalize the file in the MPQ
if(hf != NULL)
{
SFileAddFile_Finish(hf);
}
// Free buffers
if(nError == ERROR_SUCCESS)
ha->dwFlags &= ~MPQ_FLAG_INV_LISTFILE;
if(SortTable != NULL)
STORM_FREE(SortTable);
return nError;
}
示例14: SFileGetFileInfo
//.........这里部分代码省略.........
if(hf != NULL && hf->pFileEntry != NULL)
{
pvSrcFileInfo = &hf->pFileEntry->dwFlags;
cbSrcFileInfo = sizeof(DWORD);
nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
}
break;
case SFileInfoEncryptionKey:
hf = IsValidFileHandle(hMpqOrFile);
if(hf != NULL)
{
pvSrcFileInfo = &hf->dwFileKey;
cbSrcFileInfo = sizeof(DWORD);
nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
}
break;
case SFileInfoEncryptionKeyRaw:
hf = IsValidFileHandle(hMpqOrFile);
if(hf != NULL && hf->pFileEntry != NULL)
{
dwInt32Value = hf->dwFileKey;
if(hf->pFileEntry->dwFlags & MPQ_FILE_FIX_KEY)
dwInt32Value = (dwInt32Value ^ hf->pFileEntry->dwFileSize) - (DWORD)hf->MpqFilePos;
pvSrcFileInfo = &dwInt32Value;
cbSrcFileInfo = sizeof(DWORD);
nInfoType = SFILE_INFO_TYPE_DIRECT_POINTER;
}
break;
default: // Invalid info class
SetLastError(ERROR_INVALID_PARAMETER);
return false;
}
// If we validated the handle and info class, give as much info as possible
if(nInfoType >= SFILE_INFO_TYPE_DIRECT_POINTER)
{
// Give the length needed, if wanted
if(pcbLengthNeeded != NULL)
pcbLengthNeeded[0] = cbSrcFileInfo;
// If the caller entered an output buffer, the output size must also be entered
if(pvFileInfo != NULL && cbFileInfo != 0)
{
// Check if there is enough space in the output buffer
if(cbSrcFileInfo <= cbFileInfo)
{
switch(nInfoType)
{
case SFILE_INFO_TYPE_DIRECT_POINTER:
case SFILE_INFO_TYPE_ALLOCATED:
assert(pvSrcFileInfo != NULL);
memcpy(pvFileInfo, pvSrcFileInfo, cbSrcFileInfo);
break;
case SFILE_INFO_TYPE_READ_FROM_FILE:
if(!FileStream_Read(ha->pStream, &ByteOffset, pvFileInfo, cbSrcFileInfo))
nError = GetLastError();
break;
case SFILE_INFO_TYPE_TABLE_POINTER:
assert(pvSrcFileInfo != NULL);
*(void **)pvFileInfo = pvSrcFileInfo;
pvSrcFileInfo = NULL;
break;
case SFILE_INFO_TYPE_FILE_ENTRY:
assert(pFileEntry != NULL);
ConvertFileEntryToSelfRelative((TFileEntry *)pvFileInfo, pFileEntry);
break;
}
}
else
{
nError = ERROR_INSUFFICIENT_BUFFER;
}
}
// Free the file info if needed
if(nInfoType == SFILE_INFO_TYPE_ALLOCATED && pvSrcFileInfo != NULL)
STORM_FREE(pvSrcFileInfo);
if(nInfoType == SFILE_INFO_TYPE_TABLE_POINTER && pvSrcFileInfo != NULL)
SFileFreeFileInfo(pvSrcFileInfo, InfoClass);
}
else
{
// Handle error cases
if(nInfoType == SFILE_INFO_TYPE_INVALID_HANDLE)
nError = ERROR_INVALID_HANDLE;
if(nInfoType == SFILE_INFO_TYPE_NOT_FOUND)
nError = ERROR_FILE_NOT_FOUND;
}
// Set the last error value, if needed
if(nError != ERROR_SUCCESS)
SetLastError(nError);
return (nError == ERROR_SUCCESS);
}
示例15: SFileAddFileEx
//.........这里部分代码省略.........
// Allocate data buffer for reading from the source file
if(nError == ERROR_SUCCESS)
{
dwBytesRemaining = (DWORD)FileSize;
pbFileData = STORM_ALLOC(BYTE, dwSectorSize);
if(pbFileData == NULL)
nError = ERROR_NOT_ENOUGH_MEMORY;
}
// Deal with various combination of compressions
if(nError == ERROR_SUCCESS)
{
// When the compression for next blocks is set to default,
// we will copy the compression for the first sector
if(dwCompressionNext == MPQ_COMPRESSION_NEXT_SAME)
dwCompressionNext = dwCompression;
// If the caller wants ADPCM compression, we make sure
// that the first sector is not compressed with lossy compression
if(dwCompressionNext & (MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO))
{
// The compression of the first file sector must not be ADPCM
// in order not to corrupt the headers
if(dwCompression & (MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO))
dwCompression = MPQ_COMPRESSION_PKWARE;
// Remove both flag mono and stereo flags.
// They will be re-added according to WAVE type
dwCompressionNext &= ~(MPQ_COMPRESSION_ADPCM_MONO | MPQ_COMPRESSION_ADPCM_STEREO);
bIsAdpcmCompression = true;
}
// Initiate adding file to the MPQ
if(!SFileCreateFile(hMpq, szArchivedName, FileTime, (DWORD)FileSize, lcFileLocale, dwFlags, &hMpqFile))
nError = GetLastError();
}
// Write the file data to the MPQ
while(nError == ERROR_SUCCESS && dwBytesRemaining != 0)
{
// Get the number of bytes remaining in the source file
dwBytesToRead = dwBytesRemaining;
if(dwBytesToRead > dwSectorSize)
dwBytesToRead = dwSectorSize;
// Read data from the local file
if(!FileStream_Read(pStream, NULL, pbFileData, dwBytesToRead))
{
nError = GetLastError();
break;
}
// If the file being added is a WAVE file, we check number of channels
if(bIsFirstSector && bIsAdpcmCompression)
{
// The file must really be a WAVE file with at least 16 bits per sample,
// otherwise the ADPCM compression will corrupt it
if(IsWaveFile_16BitsPerAdpcmSample(pbFileData, dwBytesToRead, &dwChannels))
{
// Setup the compression of next sectors according to number of channels
dwCompressionNext |= (dwChannels == 1) ? MPQ_COMPRESSION_ADPCM_MONO : MPQ_COMPRESSION_ADPCM_STEREO;
}
else
{
// Setup the compression of next sectors to a lossless compression
dwCompressionNext = (dwCompression & MPQ_LOSSY_COMPRESSION_MASK) ? MPQ_COMPRESSION_PKWARE : dwCompression;
}
bIsFirstSector = false;
}
// Add the file sectors to the MPQ
if(!SFileWriteFile(hMpqFile, pbFileData, dwBytesToRead, dwCompression))
{
nError = GetLastError();
break;
}
// Set the next data compression
dwBytesRemaining -= dwBytesToRead;
dwCompression = dwCompressionNext;
}
// Finish the file writing
if(hMpqFile != NULL)
{
if(!SFileFinishFile(hMpqFile))
nError = GetLastError();
}
// Cleanup and exit
if(pbFileData != NULL)
STORM_FREE(pbFileData);
if(pStream != NULL)
FileStream_Close(pStream);
if(nError != ERROR_SUCCESS)
SetLastError(nError);
return (nError == ERROR_SUCCESS);
}