aboutsummaryrefslogtreecommitdiff
path: root/src/SBaseFileTable.cpp
diff options
context:
space:
mode:
authorunknown <E:\Ladik\Mail>2015-05-01 07:06:29 +0200
committerunknown <E:\Ladik\Mail>2015-05-01 07:06:29 +0200
commit46930855f500c1b494e3b16bb7a3323c07d4d5fb (patch)
tree6220cc643761137a930841d8ec828db9f3db53cf /src/SBaseFileTable.cpp
parenta205159d004871efbedd7cbfb686b8fe82bfb532 (diff)
+ Removed back reference of FileTable -> HashTable, as it is logically incorrect
+ Optimized patching process so it consimes less memory + Added hash table and block table defragmenting for malformed War3 maps
Diffstat (limited to 'src/SBaseFileTable.cpp')
-rw-r--r--src/SBaseFileTable.cpp1032
1 files changed, 490 insertions, 542 deletions
diff --git a/src/SBaseFileTable.cpp b/src/SBaseFileTable.cpp
index 97ecb53..9e6d4b6 100644
--- a/src/SBaseFileTable.cpp
+++ b/src/SBaseFileTable.cpp
@@ -372,9 +372,9 @@ int ConvertMpqHeaderToFormat4(
//
Label_ArchiveVersion1:
- if(pHeader->dwHashTablePos <= pHeader->dwHeaderSize)
+ if(pHeader->dwHashTablePos <= pHeader->dwHeaderSize || (pHeader->dwHashTablePos & 0x80000000))
ha->dwFlags |= MPQ_FLAG_MALFORMED;
- if(pHeader->dwBlockTablePos <= pHeader->dwHeaderSize)
+ if(pHeader->dwBlockTablePos <= pHeader->dwHeaderSize || (pHeader->dwBlockTablePos & 0x80000000))
ha->dwFlags |= MPQ_FLAG_MALFORMED;
// Fill the rest of the header
@@ -568,42 +568,35 @@ int ConvertMpqHeaderToFormat4(
//-----------------------------------------------------------------------------
// Support for hash table
-static bool IsValidHashEntry(TMPQArchive * ha, TMPQHash * pHash)
+// Hash entry verification when the file table does not exist yet
+static bool IsValidHashEntry1(TMPQArchive * ha, TMPQHash * pHash, TMPQBlock * pBlockTable)
{
- // The block table index must be smaller than file table size
- // The block table entry's flags must have MPQ_FILE_EXISTS set
- return (
- (pHash->dwBlockIndex < ha->dwFileTableSize) &&
- (ha->pFileTable[pHash->dwBlockIndex].dwFlags & MPQ_FILE_EXISTS_MASK) == MPQ_FILE_EXISTS
- );
-}
-
-// Returns a hash table entry in the following order:
-// 1) A hash table entry with the neutral locale
-// 2) A hash table entry with any other locale
-// 3) NULL
-static TMPQHash * GetHashEntryAny(TMPQArchive * ha, const char * szFileName)
-{
- TMPQHash * pHashNeutral = NULL;
- TMPQHash * pFirstHash = GetFirstHashEntry(ha, szFileName);
- TMPQHash * pHashAny = NULL;
- TMPQHash * pHash = pFirstHash;
+ ULONGLONG ByteOffset;
+ TMPQBlock * pBlock = pBlockTable + pHash->dwBlockIndex;
- // Parse the found hashes
- while(pHash != NULL)
+ // Storm.dll does not perform this check. However, if there will
+ // be an entry with (dwBlockIndex > dwBlockTableSize), the game would crash
+ // Hence we assume that dwBlockIndex must be less than dwBlockTableSize
+ if(pHash->dwBlockIndex < ha->pHeader->dwBlockTableSize)
{
- // If we found neutral hash, remember it
- if(pHash->lcLocale == 0)
- pHashNeutral = pHash;
- if(pHashAny == NULL)
- pHashAny = pHash;
-
- // Get the next hash entry for that file
- pHash = GetNextHashEntry(ha, pFirstHash, pHash);
+ // Check whether this is an existing file
+ // Also we do not allow to be file size greater than 2GB
+ if((pBlock->dwFlags & MPQ_FILE_EXISTS) && (pBlock->dwFSize & 0x8000000) == 0)
+ {
+ // The begin of the file must be within the archive
+ ByteOffset = FileOffsetFromMpqOffset(ha, pBlock->dwFilePos);
+ return (ByteOffset < ha->FileSize);
+ }
}
- // At the end, return neutral hash (if found), otherwise NULL
- return (pHashNeutral != NULL) ? pHashNeutral : pHashAny;
+ return false;
+}
+
+// Hash entry verification when the file table does not exist yet
+static bool IsValidHashEntry2(TMPQArchive * ha, TMPQHash * pHash)
+{
+ TFileEntry * pFileEntry = ha->pFileTable + pHash->dwBlockIndex;
+ return ((pHash->dwBlockIndex < ha->dwFileTableSize) && (pFileEntry->dwFlags & MPQ_FILE_EXISTS)) ? true : false;
}
// Returns a hash table entry in the following order:
@@ -620,7 +613,7 @@ static TMPQHash * GetHashEntryLocale(TMPQArchive * ha, const char * szFileName,
while(pHash != NULL)
{
// If the locales match, return it
- if(pHash->lcLocale == lcLocale)
+ if(lcLocale == 0 && lcLocale == pHash->lcLocale)
return pHash;
// If we found neutral hash, remember it
@@ -658,78 +651,172 @@ static TMPQHash * GetHashEntryExact(TMPQArchive * ha, const char * szFileName, L
return NULL;
}
-static int BuildFileTableFromBlockTable(
+// Defragment the file table so it does not contain any gaps
+// Note: As long as all values of all TMPQHash::dwBlockIndex
+// are not HASH_ENTRY_FREE, the startup search index does not matter.
+// Hash table is circular, so as long as there is no terminator,
+// all entries will be found.
+static TMPQHash * DefragmentHashTable(
TMPQArchive * ha,
- TFileEntry * pFileTable,
+ TMPQHash * pHashTable,
TMPQBlock * pBlockTable)
{
- TFileEntry * pFileEntry;
TMPQHeader * pHeader = ha->pHeader;
- TMPQBlock * pBlock;
- TMPQHash * pHashEnd = ha->pHashTable + ha->dwHashTableSize;
- TMPQHash * pHash;
+ TMPQHash * pHashTableEnd = pHashTable + pHeader->dwHashTableSize;
+ TMPQHash * pSource = pHashTable;
+ TMPQHash * pTarget = pHashTable;
+ DWORD dwFirstFreeEntry;
+ DWORD dwNewTableSize;
- // Parse the entire hash table
- for(pHash = ha->pHashTable; pHash < pHashEnd; pHash++)
- {
- //
- // Only if the block index is less than block table size.
- // An MPQ protector can place multiple invalid entries
- // into the hash table:
- //
- // a6d79af0 e61a0932 001e0000 0000770b <== Fake valid
- // a6d79af0 e61a0932 0000d761 0000dacb <== Fake valid
- // a6d79af0 e61a0932 00000000 0000002f <== Real file entry (block index is valid)
- // a6d79af0 e61a0932 00005a4f 000093bc <== Fake valid
- //
+ // Sanity checks
+ assert(pHeader->wFormatVersion == MPQ_FORMAT_VERSION_1);
+ assert(pHeader->HiBlockTablePos64 == 0);
- if(pHash->dwBlockIndex < pHeader->dwBlockTableSize)
+ // Parse the hash table and move the entries to the begin of it
+ for(pSource = pHashTable; pSource < pHashTableEnd; pSource++)
+ {
+ // Check whether this is a valid hash table entry
+ if(IsValidHashEntry1(ha, pSource, pBlockTable))
{
- // Get the pointer to the file entry and the block entry
- pFileEntry = pFileTable + pHash->dwBlockIndex;
- pBlock = pBlockTable + pHash->dwBlockIndex;
+ // Copy the hash table entry back
+ if(pSource > pTarget)
+ pTarget[0] = pSource[0];
- // Only if the block table entry contains valid file
- if((pBlock->dwFlags & MPQ_FILE_EXISTS) && !(pBlock->dwFlags & ~MPQ_FILE_VALID_FLAGS))
- {
- // ByteOffset is only valid if file size is not zero
- pFileEntry->ByteOffset = pBlock->dwFilePos;
- if(pFileEntry->ByteOffset == 0 && pBlock->dwFSize == 0)
- pFileEntry->ByteOffset = ha->pHeader->dwHeaderSize;
-
- pFileEntry->dwHashIndex = (DWORD)(pHash - ha->pHashTable);
- pFileEntry->dwFileSize = pBlock->dwFSize;
- pFileEntry->dwCmpSize = pBlock->dwCSize;
- pFileEntry->dwFlags = pBlock->dwFlags;
- pFileEntry->lcLocale = pHash->lcLocale;
- pFileEntry->wPlatform = pHash->wPlatform;
- }
+ // Move the target
+ pTarget++;
}
}
- return ERROR_SUCCESS;
+ // Calculate how many entries in the hash table we really need
+ dwFirstFreeEntry = (DWORD)(pTarget - pHashTable);
+ dwNewTableSize = GetHashTableSizeForFileCount(dwFirstFreeEntry);
+
+ // Fill the rest with entries that look like deleted
+ pHashTableEnd = pHashTable + dwNewTableSize;
+ pSource = pHashTable + dwFirstFreeEntry;
+ memset(pSource, 0xFF, (dwNewTableSize - dwFirstFreeEntry) * sizeof(TMPQHash));
+
+ // Mark the block indexes as deleted
+ for(; pSource < pHashTableEnd; pSource++)
+ pSource->dwBlockIndex = HASH_ENTRY_DELETED;
+
+ // Free some of the space occupied by the hash table
+ if(dwNewTableSize < pHeader->dwHashTableSize)
+ {
+ pHashTable = STORM_REALLOC(TMPQHash, pHashTable, dwNewTableSize);
+ ha->pHeader->dwHashTableSize = dwNewTableSize;
+ }
+
+ return pHashTable;
}
-static int UpdateFileTableFromHashTable(
+static int BuildFileTableFromBlockTable(
TMPQArchive * ha,
- TFileEntry * pFileTable)
+ TMPQBlock * pBlockTable)
{
TFileEntry * pFileEntry;
- TMPQHash * pHashEnd = ha->pHashTable + ha->dwHashTableSize;
+ TMPQHeader * pHeader = ha->pHeader;
+ TMPQBlock * pBlock;
+ TMPQHash * pHashTableEnd;
TMPQHash * pHash;
+ LPDWORD DefragmentTable = NULL;
+ DWORD dwItemCount = 0;
- for(pHash = ha->pHashTable; pHash < pHashEnd; pHash++)
+ // Sanity checks
+ assert(ha->pFileTable != NULL);
+ assert(ha->dwFileTableSize >= ha->dwMaxFileCount);
+
+ // Defragment the hash table, if needed
+ if(ha->dwFlags & MPQ_FLAG_HASH_TABLE_CUT)
{
- if(pHash->dwBlockIndex < ha->dwFileTableSize)
+ ha->pHashTable = DefragmentHashTable(ha, ha->pHashTable, pBlockTable);
+ ha->dwMaxFileCount = pHeader->dwHashTableSize;
+ }
+
+ // If the hash table or block table is cut,
+ // we will defragment the block table
+ if(ha->dwFlags & (MPQ_FLAG_HASH_TABLE_CUT | MPQ_FLAG_BLOCK_TABLE_CUT))
+ {
+ // Sanity checks
+ assert(pHeader->wFormatVersion == MPQ_FORMAT_VERSION_1);
+ assert(pHeader->HiBlockTablePos64 == 0);
+
+ // Allocate the translation table
+ DefragmentTable = STORM_ALLOC(DWORD, pHeader->dwBlockTableSize);
+ if(DefragmentTable == NULL)
+ return ERROR_NOT_ENOUGH_MEMORY;
+
+ // Fill the translation table
+ memset(DefragmentTable, 0xFF, pHeader->dwBlockTableSize * sizeof(DWORD));
+ }
+
+ // Parse the entire hash table
+ pHashTableEnd = ha->pHashTable + pHeader->dwHashTableSize;
+ for(pHash = ha->pHashTable; pHash < pHashTableEnd; pHash++)
+ {
+ DWORD dwBlockIndex = pHash->dwBlockIndex;
+ DWORD dwNewIndex = pHash->dwBlockIndex;
+
+ //
+ // We need to properly handle these cases:
+ // - Multiple hash entries (same file name) point to the same block entry
+ // - Multiple hash entries (different file name) point to the same block entry
+ //
+ // Ignore all hash table entries where:
+ // - dwBlockIndex >= BlockTableSize
+ // - Flags of the appropriate block table entry
+
+ if(IsValidHashEntry1(ha, pHash, pBlockTable))
{
- pFileEntry = pFileTable + pHash->dwBlockIndex;
- if(pFileEntry->dwFlags & MPQ_FILE_EXISTS)
+ // Determine the new block index
+ if(DefragmentTable != NULL)
{
- pFileEntry->dwHashIndex = (DWORD)(pHash - ha->pHashTable);
- pFileEntry->lcLocale = pHash->lcLocale;
- pFileEntry->wPlatform = pHash->wPlatform;
+ // Need to handle case when multile hash
+ // entries point to the same block entry
+ if(DefragmentTable[dwBlockIndex] == HASH_ENTRY_FREE)
+ {
+ DefragmentTable[dwBlockIndex] = dwItemCount;
+ dwNewIndex = dwItemCount++;
+ }
+ else
+ {
+ dwNewIndex = DefragmentTable[dwBlockIndex];
+ }
+
+ // Fix the pointer in the hash entry
+ pHash->dwBlockIndex = dwNewIndex;
}
+
+ // Get the pointer to the file entry and the block entry
+ pFileEntry = ha->pFileTable + dwNewIndex;
+ pBlock = pBlockTable + dwNewIndex;
+
+ // ByteOffset is only valid if file size is not zero
+ pFileEntry->ByteOffset = pBlock->dwFilePos;
+ if(pFileEntry->ByteOffset == 0 && pBlock->dwFSize == 0)
+ pFileEntry->ByteOffset = ha->pHeader->dwHeaderSize;
+
+ // Fill the rest of the file entry
+ pFileEntry->dwFileSize = pBlock->dwFSize;
+ pFileEntry->dwCmpSize = pBlock->dwCSize;
+ pFileEntry->dwFlags = pBlock->dwFlags;
+ }
+ }
+
+ // Free the translation table
+ if(DefragmentTable != NULL)
+ {
+ // If we defragmented the block table in the process,
+ // free some memory by shrinking the file table
+ if(ha->dwFileTableSize > ha->dwMaxFileCount)
+ {
+ ha->pFileTable = STORM_REALLOC(TFileEntry, ha->pFileTable, ha->dwMaxFileCount);
+ ha->pHeader->dwBlockTableSize = ha->dwMaxFileCount;
+ ha->dwFileTableSize = ha->dwMaxFileCount;
}
+
+ // Free the translation table
+ STORM_FREE(DefragmentTable);
}
return ERROR_SUCCESS;
@@ -743,11 +830,11 @@ static TMPQHash * TranslateHashTable(
size_t HashTableSize;
// Allocate copy of the hash table
- pHashTable = STORM_ALLOC(TMPQHash, ha->dwHashTableSize);
+ pHashTable = STORM_ALLOC(TMPQHash, ha->pHeader->dwHashTableSize);
if(pHashTable != NULL)
{
// Copy the hash table
- HashTableSize = sizeof(TMPQHash) * ha->dwHashTableSize;
+ HashTableSize = sizeof(TMPQHash) * ha->pHeader->dwHashTableSize;
memcpy(pHashTable, ha->pHashTable, HashTableSize);
// Give the size to the caller
@@ -769,18 +856,17 @@ TMPQBlock * TranslateBlockTable(
TFileEntry * pFileEntry = ha->pFileTable;
TMPQBlock * pBlockTable;
TMPQBlock * pBlock;
- size_t NeedHiBlockTable = 0;
- size_t BlockTableSize;
+ DWORD dwBlockTableSize = ha->pHeader->dwBlockTableSize;
+ DWORD NeedHiBlockTable = 0;
// Allocate copy of the hash table
- pBlockTable = pBlock = STORM_ALLOC(TMPQBlock, ha->dwFileTableSize);
+ pBlockTable = pBlock = STORM_ALLOC(TMPQBlock, dwBlockTableSize);
if(pBlockTable != NULL)
{
- // Copy the block table
- BlockTableSize = sizeof(TMPQBlock) * ha->dwFileTableSize;
- for(DWORD i = 0; i < ha->dwFileTableSize; i++)
+ // Convert the block table
+ for(DWORD i = 0; i < dwBlockTableSize; i++)
{
- NeedHiBlockTable |= (pFileEntry->ByteOffset >> 32);
+ NeedHiBlockTable |= (DWORD)(pFileEntry->ByteOffset >> 32);
pBlock->dwFilePos = (DWORD)pFileEntry->ByteOffset;
pBlock->dwFSize = pFileEntry->dwFileSize;
pBlock->dwCSize = pFileEntry->dwCmpSize;
@@ -792,7 +878,7 @@ TMPQBlock * TranslateBlockTable(
// Give the size to the caller
if(pcbTableSize != NULL)
- *pcbTableSize = (ULONGLONG)BlockTableSize;
+ *pcbTableSize = (ULONGLONG)dwBlockTableSize * sizeof(TMPQBlock);
if(pbNeedHiBlockTable != NULL)
*pbNeedHiBlockTable = NeedHiBlockTable ? true : false;
@@ -808,20 +894,19 @@ static USHORT * TranslateHiBlockTable(
TFileEntry * pFileEntry = ha->pFileTable;
USHORT * pHiBlockTable;
USHORT * pHiBlock;
- size_t HiBlockTableSize;
+ DWORD dwBlockTableSize = ha->pHeader->dwBlockTableSize;
// Allocate copy of the hash table
- pHiBlockTable = pHiBlock = STORM_ALLOC(USHORT, ha->dwFileTableSize);
+ pHiBlockTable = pHiBlock = STORM_ALLOC(USHORT, dwBlockTableSize);
if(pHiBlockTable != NULL)
{
// Copy the block table
- HiBlockTableSize = sizeof(USHORT) * ha->dwFileTableSize;
- for(DWORD i = 0; i < ha->dwFileTableSize; i++)
+ for(DWORD i = 0; i < dwBlockTableSize; i++)
pHiBlock[i] = (USHORT)(pFileEntry[i].ByteOffset >> 0x20);
// Give the size to the caller
if(pcbTableSize != NULL)
- *pcbTableSize = (ULONGLONG)HiBlockTableSize;
+ *pcbTableSize = (ULONGLONG)dwBlockTableSize * sizeof(USHORT);
}
return pHiBlockTable;
@@ -1267,7 +1352,7 @@ static TMPQExtHeader * TranslateHetTable(TMPQHetTable * pHetTable, ULONGLONG * p
return &pHetHeader->ExtHdr;
}
-DWORD GetFileIndex_Het(TMPQArchive * ha, const char * szFileName)
+static DWORD GetFileIndex_Het(TMPQArchive * ha, const char * szFileName)
{
TMPQHetTable * pHetTable = ha->pHetTable;
ULONGLONG FileNameHash;
@@ -1306,17 +1391,13 @@ DWORD GetFileIndex_Het(TMPQArchive * ha, const char * szFileName)
GetBits(pHetTable->pBetIndexes, pHetTable->dwIndexSizeTotal * Index,
pHetTable->dwIndexSize,
&dwFileIndex,
- 4);
- //
- // TODO: This condition only happens when we are opening a MPQ
- // where some files were deleted by StormLib. Perhaps
- // we should not allow shrinking of the file table in MPQs v 4.0?
- // assert(dwFileIndex <= ha->dwFileTableSize);
- //
+ sizeof(DWORD));
// Verify the FileNameHash against the entry in the table of name hashes
if(dwFileIndex <= ha->dwFileTableSize && ha->pFileTable[dwFileIndex].FileNameHash == FileNameHash)
+ {
return dwFileIndex;
+ }
}
// Move to the next entry in the HET table
@@ -1368,6 +1449,7 @@ static void CreateBetHeader(
pBetHeader->ExtHdr.dwDataSize = 0;
// Get the maximum values for the BET table
+ pFileTableEnd = ha->pFileTable + ha->pHeader->dwBlockTableSize;
for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
{
//
@@ -1416,7 +1498,7 @@ static void CreateBetHeader(
pBetHeader->dwBitCount_Unknown;
// Save the file count and flag count
- pBetHeader->dwEntryCount = ha->dwFileTableSize;
+ pBetHeader->dwEntryCount = ha->pHeader->dwBlockTableSize;
pBetHeader->dwFlagCount = dwMaxFlagIndex + 1;
pBetHeader->dwUnknown08 = 0x10;
@@ -1577,6 +1659,7 @@ TMPQExtHeader * TranslateBetTable(
DWORD nBitOffset = 0;
// Construct the bit-based file table
+ pFileTableEnd = ha->pFileTable + BetHeader.dwEntryCount;
for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
{
//
@@ -1678,11 +1761,25 @@ void FreeBetTable(TMPQBetTable * pBetTable)
//-----------------------------------------------------------------------------
// Support for file table
-TFileEntry * GetFileEntryAny(TMPQArchive * ha, const char * szFileName)
+TFileEntry * GetFileEntryLocale2(TMPQArchive * ha, const char * szFileName, LCID lcLocale, LPDWORD PtrHashIndex)
{
TMPQHash * pHash;
DWORD dwFileIndex;
+ // First, we have to search the classic hash table
+ // This is because on renaming, deleting, or changing locale,
+ // we will need the pointer to hash table entry
+ if(ha->pHashTable != NULL)
+ {
+ pHash = GetHashEntryLocale(ha, szFileName, lcLocale);
+ if(pHash != NULL && pHash->dwBlockIndex < ha->dwFileTableSize)
+ {
+ if(PtrHashIndex != NULL)
+ PtrHashIndex[0] = (DWORD)(pHash - ha->pHashTable);
+ return ha->pFileTable + pHash->dwBlockIndex;
+ }
+ }
+
// If we have HET table in the MPQ, try to find the file in HET table
if(ha->pHetTable != NULL)
{
@@ -1691,76 +1788,48 @@ TFileEntry * GetFileEntryAny(TMPQArchive * ha, const char * szFileName)
return ha->pFileTable + dwFileIndex;
}
- // Otherwise, perform the file search in the classic hash table
- if(ha->pHashTable != NULL)
- {
- pHash = GetHashEntryAny(ha, szFileName);
- if(pHash != NULL && pHash->dwBlockIndex < ha->dwFileTableSize)
- return ha->pFileTable + pHash->dwBlockIndex;
- }
-
// Not found
return NULL;
}
TFileEntry * GetFileEntryLocale(TMPQArchive * ha, const char * szFileName, LCID lcLocale)
{
+ return GetFileEntryLocale2(ha, szFileName, lcLocale, NULL);
+}
+
+TFileEntry * GetFileEntryExact(TMPQArchive * ha, const char * szFileName, LCID lcLocale, LPDWORD PtrHashIndex)
+{
TMPQHash * pHash;
DWORD dwFileIndex;
- // If we have HET table in the MPQ, try to find the file in HET table
- if(ha->pHetTable != NULL)
- {
- dwFileIndex = GetFileIndex_Het(ha, szFileName);
- if(dwFileIndex != HASH_ENTRY_FREE)
- return ha->pFileTable + dwFileIndex;
- }
-
- // Otherwise, perform the file search in the classic hash table
+ // If the hash table is present, find the entry from hash table
if(ha->pHashTable != NULL)
{
- pHash = GetHashEntryLocale(ha, szFileName, lcLocale);
+ pHash = GetHashEntryExact(ha, szFileName, lcLocale);
if(pHash != NULL && pHash->dwBlockIndex < ha->dwFileTableSize)
+ {
+ if(PtrHashIndex != NULL)
+ PtrHashIndex[0] = (DWORD)(pHash - ha->pHashTable);
return ha->pFileTable + pHash->dwBlockIndex;
+ }
}
-
- // Not found
- return NULL;
-}
-
-TFileEntry * GetFileEntryExact(TMPQArchive * ha, const char * szFileName, LCID lcLocale)
-{
- TMPQHash * pHash;
- DWORD dwFileIndex;
// If we have HET table in the MPQ, try to find the file in HET table
if(ha->pHetTable != NULL)
{
dwFileIndex = GetFileIndex_Het(ha, szFileName);
if(dwFileIndex != HASH_ENTRY_FREE)
+ {
+ if(PtrHashIndex != NULL)
+ PtrHashIndex[0] = HASH_ENTRY_FREE;
return ha->pFileTable + dwFileIndex;
+ }
}
-
- // Otherwise, perform the file search in the classic hash table
- if(ha->pHashTable != NULL)
- {
- pHash = GetHashEntryExact(ha, szFileName, lcLocale);
- if(pHash != NULL && pHash->dwBlockIndex < ha->dwFileTableSize)
- return ha->pFileTable + pHash->dwBlockIndex;
- }
-
+
// Not found
return NULL;
}
-TFileEntry * GetFileEntryByIndex(TMPQArchive * ha, DWORD dwIndex)
-{
- // For MPQs with classic hash table
- if(dwIndex < ha->dwFileTableSize)
- return ha->pFileTable + dwIndex;
- return NULL;
-}
-
void AllocateFileName(TMPQArchive * ha, TFileEntry * pFileEntry, const char * szFileName)
{
// Sanity check
@@ -1792,99 +1861,103 @@ void AllocateFileName(TMPQArchive * ha, TFileEntry * pFileEntry, const char * sz
}
}
-TFileEntry * AllocateFileEntry(TMPQArchive * ha, const char * szFileName, LCID lcLocale)
+TFileEntry * AllocateFileEntry(TMPQArchive * ha, const char * szFileName, LCID lcLocale, LPDWORD PtrHashIndex)
{
- TFileEntry * pFileTableEnd;
- TFileEntry * pFileTablePtr;
- TFileEntry * pFileTable = ha->pFileTable;
- TFileEntry * pLastEntry = ha->pFileTable;
- TFileEntry * pFileEntry = NULL;
- TMPQHash * pHash;
- DWORD dwFreeNeeded = ha->dwReservedFiles;
+ TFileEntry * pFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
+ TFileEntry * pFreeEntry = NULL;
+ TFileEntry * pFileEntry;
+ TMPQHash * pHash = NULL;
+ DWORD dwReservedFiles = ha->dwReservedFiles;
DWORD dwFreeCount = 0;
- // The entry in the hash table should not be there.
- // This case must be handled by the caller
- assert(ha->pHashTable == NULL || GetHashEntryExact(ha, szFileName, lcLocale) == NULL);
- assert(ha->pHetTable == NULL || GetFileIndex_Het(ha, szFileName) == HASH_ENTRY_FREE);
+ // Sanity check: File table size must be greater or equal to max file count
+ assert(ha->dwFileTableSize >= ha->dwMaxFileCount);
- // Determine the end of the file table
- pFileTableEnd = ha->pFileTable + STORMLIB_MAX(ha->dwFileTableSize, ha->dwMaxFileCount);
+ // If we are saving MPQ tables, we don't tale number of reserved files into account
+ dwReservedFiles = (ha->dwFlags & MPQ_FLAG_SAVING_TABLES) ? 0 : ha->dwReservedFiles;
- // Find the first free file entry
- for(pFileTablePtr = pFileTable; pFileTablePtr < pFileTableEnd; pFileTablePtr++)
+ // Now find a free entry in the file table.
+ // Note that in the case when free entries are in the middle,
+ // we need to use these
+ for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
{
- // Is that entry free?
- // Note: Files with "delete marker" are not deleted.
- // Don't treat them as available
- if((pFileTablePtr->dwFlags & MPQ_FILE_EXISTS) == 0)
+ if((pFileEntry->dwFlags & MPQ_FILE_EXISTS) == 0)
{
- if(pFileEntry == NULL)
- pFileEntry = pFileTablePtr;
+ // Remember the first free entry
+ if(pFreeEntry == NULL)
+ pFreeEntry = pFileEntry;
dwFreeCount++;
- }
- else
- {
- // Update the last used entry
- if(pFileTablePtr > pLastEntry)
- pLastEntry = pFileTablePtr;
+
+ // If the number of free items is greater than number
+ // of reserved items, We can add the file
+ if(dwFreeCount > dwReservedFiles)
+ break;
}
}
- // Did we find an usable file entry?
- if(pFileEntry != NULL)
+ // If the total number of free entries is less than number of reserved files,
+ // we cannot add the file to the archive
+ if(pFreeEntry == NULL || dwFreeCount <= dwReservedFiles)
+ return NULL;
+
+ // Initialize the file entry and set its file name
+ memset(pFreeEntry, 0, sizeof(TFileEntry));
+ AllocateFileName(ha, pFreeEntry, szFileName);
+
+ // If the archive has a hash table, we need to first free entry there
+ if(ha->pHashTable != NULL)
{
- // Is there enough free entries?
- if(!(ha->dwFlags & MPQ_FLAG_SAVING_TABLES))
- dwFreeNeeded++;
- if(dwFreeCount < dwFreeNeeded)
- return NULL;
+ // Make sure that the entry is not there yet
+ assert(GetHashEntryExact(ha, szFileName, lcLocale) == NULL);
- // Make sure that the entry is properly initialized
- memset(pFileEntry, 0, sizeof(TFileEntry));
- pFileEntry->lcLocale = (USHORT)lcLocale;
- AllocateFileName(ha, pFileEntry, szFileName);
+ // Find a free hash table entry for the name
+ pHash = AllocateHashEntry(ha, pFreeEntry, lcLocale);
+ if(pHash == NULL)
+ return NULL;
- // If the MPQ has hash table, we have to insert the new entry into the hash table
- // We expect it to succeed because there must be a free hash entry if there is a free file entry
- // Note: Don't bother with the HET table. It will be rebuilt anyway
- if(ha->pHashTable != NULL)
- {
- pHash = AllocateHashEntry(ha, pFileEntry);
- assert(pHash != NULL);
- }
+ // Set the file index to the hash table
+ pHash->dwBlockIndex = (DWORD)(pFreeEntry - ha->pFileTable);
+ PtrHashIndex[0] = (DWORD)(pHash - ha->pHashTable);
+ }
- // Update the file table size
- if(pFileEntry >= pLastEntry)
- pLastEntry = pFileEntry;
- ha->dwFileTableSize = (DWORD)(pLastEntry - pFileTable) + 1;
+ // If the archive has a HET table, just do some checks
+ // Note: Don't bother modifying the HET table. It will be rebuilt from scratch after, anyway
+ if(ha->pHetTable != NULL)
+ {
+ assert(GetFileIndex_Het(ha, szFileName) == HASH_ENTRY_FREE);
}
- // Return the file entry
- return pFileEntry;
+ // Return the free table entry
+ return pFreeEntry;
}
int RenameFileEntry(
TMPQArchive * ha,
- TFileEntry * pFileEntry,
+ TMPQFile * hf,
const char * szNewFileName)
{
- TMPQHash * pHash;
+ TFileEntry * pFileEntry = hf->pFileEntry;
+ TMPQHash * pHashEntry = hf->pHashEntry;
+ LCID lcLocale = 0;
- // Mark the entry as deleted in the hash table
+ // If the archive hash hash table, we need to free the hash table entry
if(ha->pHashTable != NULL)
{
- assert(pFileEntry->dwHashIndex < ha->dwHashTableSize);
+ // The file must have hash table entry assigned
+ // Will exit if there are multiple HASH entries pointing to the same file entry
+ if(pHashEntry == NULL)
+ return ERROR_NOT_SUPPORTED;
- pHash = ha->pHashTable + pFileEntry->dwHashIndex;
- memset(pHash, 0xFF, sizeof(TMPQHash));
- pHash->dwBlockIndex = HASH_ENTRY_DELETED;
- }
+ // Save the locale
+ lcLocale = pHashEntry->lcLocale;
- //
- // Note: Don't bother with the HET table.
- // It will be rebuilt from scratch anyway
- //
+ // Mark the hash table entry as deleted
+ pHashEntry->dwName1 = 0xFFFFFFFF;
+ pHashEntry->dwName2 = 0xFFFFFFFF;
+ pHashEntry->lcLocale = 0xFFFF;
+ pHashEntry->wPlatform = 0xFFFF;
+ pHashEntry->dwBlockIndex = HASH_ENTRY_DELETED;
+ }
// Free the old file name
if(pFileEntry->szFileName != NULL)
@@ -1894,44 +1967,36 @@ int RenameFileEntry(
// Allocate new file name
AllocateFileName(ha, pFileEntry, szNewFileName);
- // Now find a hash entry for the new file name
+ // Allocate new hash entry
if(ha->pHashTable != NULL)
{
- // Try to find the hash table entry for the new file name
- // Note: Since we deleted one hash entry, this will always succeed
- pHash = AllocateHashEntry(ha, pFileEntry);
- assert(pHash != NULL);
+ // Since we freed one hash entry before, this must succeed
+ hf->pHashEntry = AllocateHashEntry(ha, pFileEntry, lcLocale);
+ assert(hf->pHashEntry != NULL);
}
- // Invalidate the entries for (listfile) and (attributes)
- // After we are done with MPQ changes, we need to re-create them
- InvalidateInternalFiles(ha);
return ERROR_SUCCESS;
}
-void DeleteFileEntry(
- TMPQArchive * ha,
- TFileEntry * pFileEntry)
+int DeleteFileEntry(TMPQArchive * ha, TMPQFile * hf)
{
- TMPQHash * pHash;
+ TFileEntry * pFileEntry = hf->pFileEntry;
+ TMPQHash * pHashEntry = hf->pHashEntry;
- // If the MPQ has classic hash table, clear the entry there
+ // If the archive hash hash table, we need to free the hash table entry
if(ha->pHashTable != NULL)
{
- // Only if the file entry is still an existing one
- if(pFileEntry->dwFlags & MPQ_FILE_EXISTS)
- {
- // We expect dwHashIndex to be within the hash table
- pHash = ha->pHashTable + pFileEntry->dwHashIndex;
- assert(pFileEntry->dwHashIndex < ha->dwHashTableSize);
-
- // Set the hash table entry as deleted
- pHash->dwName1 = 0xFFFFFFFF;
- pHash->dwName2 = 0xFFFFFFFF;
- pHash->lcLocale = 0xFFFF;
- pHash->wPlatform = 0xFFFF;
- pHash->dwBlockIndex = HASH_ENTRY_DELETED;
- }
+ // The file must have hash table entry assigned
+ // Will exit if there are multiple HASH entries pointing to the same file entry
+ if(pHashEntry == NULL)
+ return ERROR_NOT_SUPPORTED;
+
+ // Mark the hash table entry as deleted
+ pHashEntry->dwName1 = 0xFFFFFFFF;
+ pHashEntry->dwName2 = 0xFFFFFFFF;
+ pHashEntry->lcLocale = 0xFFFF;
+ pHashEntry->wPlatform = 0xFFFF;
+ pHashEntry->dwBlockIndex = HASH_ENTRY_DELETED;
}
// Free the file name, and set the file entry as deleted
@@ -1940,67 +2005,73 @@ void DeleteFileEntry(
pFileEntry->szFileName = NULL;
//
- // Don't modify the HET table, because it gets recreated from scratch at every modification operation
+ // Don't modify the HET table, because it gets recreated by the caller
// Don't decrement the number of entries in the file table
- // Keep Byte Offset, file size and compressed size in the file table
- // Also keep the CRC32 and MD5 in the file attributes
- // Clear the file name hash
- // Clear the MPQ_FILE_EXISTS bit.
+ // Keep Byte Offset, file size, compressed size, CRC32 and MD5
+ // Clear the file name hash and the MPQ_FILE_EXISTS bit
//
- pFileEntry->FileNameHash = 0;
pFileEntry->dwFlags &= ~MPQ_FILE_EXISTS;
+ pFileEntry->FileNameHash = 0;
+ return ERROR_SUCCESS;
}
-void InvalidateInternalFiles(TMPQArchive * ha)
+DWORD InvalidateInternalFile(TMPQArchive * ha, const char * szFileName, DWORD dwFlagNone, DWORD dwFlagNew)
{
- TFileEntry * pFileEntry1 = NULL;
- TFileEntry * pFileEntry2 = NULL;
- TFileEntry * pFileEntry3 = NULL;
+ TMPQFile * hf = NULL;
+ DWORD dwFileFlags = 0;
+ int nError = ERROR_FILE_NOT_FOUND;
+
+ // Open the file from the MPQ
+ if(SFileOpenFileEx((HANDLE)ha, szFileName, SFILE_OPEN_BASE_FILE, (HANDLE *)&hf))
+ {
+ // Remember the file flags
+ dwFileFlags = hf->pFileEntry->dwFlags;
+
+ // Delete the file entry
+ nError = DeleteFileEntry(ha, hf);
+ if(nError == ERROR_SUCCESS)
+ {
+ ha->dwFlags |= dwFlagNew;
+ ha->dwReservedFiles++;
+ }
+ // Free the file entry
+ FreeFileHandle(hf);
+ }
+
+ // If the deletion failed, set the "none" flag
+ ha->dwFlags |= (nError != ERROR_SUCCESS) ? dwFlagNone : 0;
+ return dwFileFlags;
+}
+
+void InvalidateInternalFiles(TMPQArchive * ha)
+{
// Do nothing if we are in the middle of saving internal files
if(!(ha->dwFlags & MPQ_FLAG_SAVING_TABLES))
{
//
- // We clear the file entries of (listfile) and (attributes)
+ // We clear the file entries for (listfile), (attributes) and (signature)
// For each internal file cleared, we increment the number
// of reserved entries in the file table.
//
// Invalidate the (listfile), if not done yet
- if((ha->dwFlags & MPQ_FLAG_LISTFILE_INVALID) == 0)
+ if((ha->dwFlags & (MPQ_FLAG_LISTFILE_NONE | MPQ_FLAG_LISTFILE_NEW)) == 0)
{
- // If the (listfile) is not there but will be, reserve one entry
- pFileEntry1 = GetFileEntryExact(ha, LISTFILE_NAME, LANG_NEUTRAL);
- if(pFileEntry1 == NULL && ha->dwFileFlags1)
- ha->dwReservedFiles++;
-
- // Mark the (listfile) as invalid
- ha->dwFlags |= MPQ_FLAG_LISTFILE_INVALID;
+ ha->dwFileFlags1 = InvalidateInternalFile(ha, LISTFILE_NAME, MPQ_FLAG_LISTFILE_NONE, MPQ_FLAG_LISTFILE_NEW);
}
// Invalidate the (attributes), if not done yet
- if((ha->dwFlags & MPQ_FLAG_ATTRIBUTES_INVALID) == 0)
+ if((ha->dwFlags & (MPQ_FLAG_ATTRIBUTES_NONE | MPQ_FLAG_ATTRIBUTES_NEW)) == 0)
{
- // If the (attributes) is not there but will be, reserve one entry
- pFileEntry2 = GetFileEntryExact(ha, ATTRIBUTES_NAME, LANG_NEUTRAL);
- if(pFileEntry2 == NULL && ha->dwFileFlags2)
- ha->dwReservedFiles++;
-
- // Mark (attributes) as invalid
- ha->dwFlags |= MPQ_FLAG_ATTRIBUTES_INVALID;
+ ha->dwFileFlags2 = InvalidateInternalFile(ha, ATTRIBUTES_NAME, MPQ_FLAG_ATTRIBUTES_NONE, MPQ_FLAG_ATTRIBUTES_NEW);
}
// Invalidate the (signature), if not done yet
- if((ha->dwFlags & MPQ_FLAG_SIGNATURE_INVALID) == 0)
+ if((ha->dwFlags & (MPQ_FLAG_SIGNATURE_NONE | MPQ_FLAG_SIGNATURE_NEW)) == 0)
{
- // If the (signature) is not there but will be, reserve one entry
- pFileEntry3 = GetFileEntryExact(ha, SIGNATURE_NAME, LANG_NEUTRAL);
- if(pFileEntry3 == NULL && ha->dwFileFlags3)
- ha->dwReservedFiles++;
-
- // Mark (signature) as invalid
- ha->dwFlags |= MPQ_FLAG_SIGNATURE_INVALID;
+ ha->dwFileFlags3 = InvalidateInternalFile(ha, SIGNATURE_NAME, MPQ_FLAG_SIGNATURE_NONE, MPQ_FLAG_SIGNATURE_NEW);
}
// Remember that the MPQ has been changed
@@ -2030,7 +2101,7 @@ int CreateHashTable(TMPQArchive * ha, DWORD dwHashTableSize)
// Fill it
memset(pHashTable, 0xFF, dwHashTableSize * sizeof(TMPQHash));
- ha->dwHashTableSize = dwHashTableSize;
+ ha->pHeader->dwHashTableSize = dwHashTableSize;
ha->dwMaxFileCount = dwHashTableSize;
ha->pHashTable = pHashTable;
return ERROR_SUCCESS;
@@ -2065,8 +2136,9 @@ static TMPQHash * LoadHashTable(TMPQArchive * ha)
// Read, decrypt and uncompress the hash table
pHashTable = (TMPQHash *)LoadMpqTable(ha, ByteOffset, dwCmpSize, dwTableSize, MPQ_KEY_HASH_TABLE, &bHashTableIsCut);
+// DumpHashTable(pHashTable, pHeader->dwHashTableSize);
- // If the hash table was cut, we need to remember it
+ // If the hash table was cut, we can/have to defragment it
if(pHashTable != NULL && bHashTableIsCut)
ha->dwFlags |= (MPQ_FLAG_MALFORMED | MPQ_FLAG_HASH_TABLE_CUT);
break;
@@ -2084,6 +2156,17 @@ static TMPQHash * LoadHashTable(TMPQArchive * ha)
return pHashTable;
}
+int CreateFileTable(TMPQArchive * ha, DWORD dwFileTableSize)
+{
+ ha->pFileTable = STORM_ALLOC(TFileEntry, dwFileTableSize);
+ if(ha->pFileTable == NULL)
+ return ERROR_NOT_ENOUGH_MEMORY;
+
+ memset(ha->pFileTable, 0x00, sizeof(TFileEntry) * dwFileTableSize);
+ ha->dwFileTableSize = dwFileTableSize;
+ return ERROR_SUCCESS;
+}
+
TMPQBlock * LoadBlockTable(TMPQArchive * ha, bool /* bDontFixEntries */)
{
TMPQHeader * pHeader = ha->pHeader;
@@ -2202,42 +2285,30 @@ int LoadAnyHashTable(TMPQArchive * ha)
// Note: We don't care about HET table limits, because HET table is rebuilt
// after each file add/rename/delete.
ha->dwMaxFileCount = (ha->pHashTable != NULL) ? pHeader->dwHashTableSize : HASH_TABLE_SIZE_MAX;
- ha->dwHashTableSize = pHeader->dwHashTableSize;
return ERROR_SUCCESS;
}
-static int BuildFileTable_Classic(
- TMPQArchive * ha,
- TFileEntry * pFileTable)
+static int BuildFileTable_Classic(TMPQArchive * ha)
{
- TFileEntry * pFileEntry;
TMPQHeader * pHeader = ha->pHeader;
TMPQBlock * pBlockTable;
int nError = ERROR_SUCCESS;
// Sanity checks
assert(ha->pHashTable != NULL);
+ assert(ha->pFileTable != NULL);
// If the MPQ has no block table, do nothing
- if(ha->pHeader->dwBlockTableSize == 0)
+ if(pHeader->dwBlockTableSize == 0)
return ERROR_SUCCESS;
+ assert(ha->dwFileTableSize >= pHeader->dwBlockTableSize);
// Load the block table
+ // WARNING! ha->pFileTable can change in the process!!
pBlockTable = (TMPQBlock *)LoadBlockTable(ha);
if(pBlockTable != NULL)
{
- // If we don't have HET table, we build the file entries from the hash&block tables
- // If we do, we only update it from the hash table
- if(ha->pHetTable == NULL)
- {
- nError = BuildFileTableFromBlockTable(ha, pFileTable, pBlockTable);
- }
- else
- {
- nError = UpdateFileTableFromHashTable(ha, pFileTable);
- }
-
- // Free the block table
+ nError = BuildFileTableFromBlockTable(ha, pBlockTable);
STORM_FREE(pBlockTable);
}
else
@@ -2265,15 +2336,14 @@ static int BuildFileTable_Classic(
// Now merge the hi-block table to the file table
if(nError == ERROR_SUCCESS)
{
+ TFileEntry * pFileEntry = ha->pFileTable;
+
+ // Swap the hi-block table
BSWAP_ARRAY16_UNSIGNED(pHiBlockTable, dwTableSize);
- pFileEntry = pFileTable;
// Add the high file offset to the base file offset.
- for(DWORD i = 0; i < pHeader->dwBlockTableSize; i++)
- {
+ for(DWORD i = 0; i < pHeader->dwBlockTableSize; i++, pFileEntry++)
pFileEntry->ByteOffset = MAKE_OFFSET64(pHiBlockTable[i], pFileEntry->ByteOffset);
- pFileEntry++;
- }
}
// Free the hi-block table
@@ -2285,18 +2355,14 @@ static int BuildFileTable_Classic(
}
}
- // Set the current size of the file table
- ha->dwFileTableSize = pHeader->dwBlockTableSize;
return nError;
}
-static int BuildFileTable_HetBet(
- TMPQArchive * ha,
- TFileEntry * pFileTable)
+static int BuildFileTable_HetBet(TMPQArchive * ha)
{
TMPQHetTable * pHetTable = ha->pHetTable;
TMPQBetTable * pBetTable;
- TFileEntry * pFileEntry = pFileTable;
+ TFileEntry * pFileEntry = ha->pFileTable;
TBitArray * pBitArray;
DWORD dwBitPosition = 0;
DWORD i;
@@ -2317,7 +2383,7 @@ static int BuildFileTable_HetBet(
DWORD dwFileIndex = 0;
// Is the entry in the HET table occupied?
- if(pHetTable->pNameHashes[i] != 0)
+ if(pHetTable->pNameHashes[i] != HET_ENTRY_FREE)
{
// Load the index to the BET table
GetBits(pHetTable->pBetIndexes, pHetTable->dwIndexSizeTotal * i,
@@ -2337,14 +2403,14 @@ static int BuildFileTable_HetBet(
8);
// Combine both part of the name hash and put it to the file table
- pFileEntry = pFileTable + dwFileIndex;
+ pFileEntry = ha->pFileTable + dwFileIndex;
pFileEntry->FileNameHash = (NameHash1 << pBetTable->dwBitCount_NameHash2) | NameHash2;
}
}
}
// Go through the entire BET table and convert it to the file table.
- pFileEntry = pFileTable;
+ pFileEntry = ha->pFileTable;
pBitArray = pBetTable->pFileTable;
for(i = 0; i < pBetTable->dwEntryCount; i++)
{
@@ -2389,7 +2455,6 @@ static int BuildFileTable_HetBet(
}
// Set the current size of the file table
- ha->dwFileTableSize = pBetTable->dwEntryCount;
FreeBetTable(pBetTable);
nError = ERROR_SUCCESS;
}
@@ -2403,11 +2468,11 @@ static int BuildFileTable_HetBet(
int BuildFileTable(TMPQArchive * ha)
{
- TFileEntry * pFileTable;
DWORD dwFileTableSize;
bool bFileTableCreated = false;
// Sanity checks
+ assert(ha->pFileTable == NULL);
assert(ha->dwFileTableSize == 0);
assert(ha->dwMaxFileCount != 0);
@@ -2415,18 +2480,19 @@ int BuildFileTable(TMPQArchive * ha)
dwFileTableSize = STORMLIB_MAX(ha->pHeader->dwBlockTableSize, ha->dwMaxFileCount);
// Allocate the file table with size determined before
- pFileTable = STORM_ALLOC(TFileEntry, dwFileTableSize);
- if(pFileTable == NULL)
+ ha->pFileTable = STORM_ALLOC(TFileEntry, dwFileTableSize);
+ if(ha->pFileTable == NULL)
return ERROR_NOT_ENOUGH_MEMORY;
// Fill the table with zeros
- memset(pFileTable, 0, dwFileTableSize * sizeof(TFileEntry));
+ memset(ha->pFileTable, 0, dwFileTableSize * sizeof(TFileEntry));
+ ha->dwFileTableSize = dwFileTableSize;
// If we have HET table, we load file table from the BET table
// Note: If BET table is corrupt or missing, we set the archive as read only
if(ha->pHetTable != NULL)
{
- if(BuildFileTable_HetBet(ha, pFileTable) != ERROR_SUCCESS)
+ if(BuildFileTable_HetBet(ha) != ERROR_SUCCESS)
ha->dwFlags |= MPQ_FLAG_READ_ONLY;
else
bFileTableCreated = true;
@@ -2436,209 +2502,105 @@ int BuildFileTable(TMPQArchive * ha)
// Note: If block table is corrupt or missing, we set the archive as read only
if(ha->pHashTable != NULL)
{
- if(BuildFileTable_Classic(ha, pFileTable) != ERROR_SUCCESS)
+ if(BuildFileTable_Classic(ha) != ERROR_SUCCESS)
ha->dwFlags |= MPQ_FLAG_READ_ONLY;
else
bFileTableCreated = true;
}
-
- // If something failed, we free the file table entry
- if(bFileTableCreated == false)
+
+ // Return result
+ return bFileTableCreated ? ERROR_SUCCESS : ERROR_FILE_CORRUPT;
+}
+
+/*
+void UpdateBlockTableSize(TMPQArchive * ha)
+{
+ TFileEntry * pFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
+ TFileEntry * pFileEntry;
+ DWORD dwBlockTableSize = 0;
+
+ // Calculate the number of files
+ for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
{
- STORM_FREE(pFileTable);
- return ERROR_FILE_CORRUPT;
+ // If the source table entry is valid,
+ if(pFileEntry->dwFlags & MPQ_FILE_EXISTS)
+ dwBlockTableSize = (DWORD)(pFileEntry - ha->pFileTable) + 1;
}
- // Assign it to the archive structure
- ha->pFileTable = pFileTable;
- return ERROR_SUCCESS;
+ // Save the block table size to the MPQ header
+ ha->pHeader->dwBlockTableSize = ha->dwReservedFiles + dwBlockTableSize;
}
+*/
// Defragment the file table so it does not contain any gaps
int DefragmentFileTable(TMPQArchive * ha)
{
- TFileEntry * pNewTable;
- TMPQHash * pHashEnd = ha->pHashTable + ha->dwHashTableSize;
- TMPQHash * pHash;
- PDWORD NewIndexes;
- DWORD dwNewTableSize = 0;
- DWORD dwNewBlockIndex;
+ TFileEntry * pFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
+ TFileEntry * pSource = ha->pFileTable;
+ TFileEntry * pTarget = ha->pFileTable;
+ LPDWORD DefragmentTable;
+ DWORD dwBlockTableSize = 0;
+ DWORD dwSrcIndex;
+ DWORD dwTrgIndex;
// Allocate brand new file table
- NewIndexes = STORM_ALLOC(DWORD, ha->dwFileTableSize);
- if(NewIndexes != NULL)
+ DefragmentTable = STORM_ALLOC(DWORD, ha->dwFileTableSize);
+ if(DefragmentTable != NULL)
{
// Clear the file table
- memset(NewIndexes, 0xFF, sizeof(DWORD) * ha->dwFileTableSize);
+ memset(DefragmentTable, 0xFF, sizeof(DWORD) * ha->dwFileTableSize);
- // Create map of OldBlockIndex => NewBlockIndex
- for(pHash = ha->pHashTable; pHash < pHashEnd; pHash++)
+ // Parse the entire file table and defragment it
+ for(; pSource < pFileTableEnd; pSource++)
{
- // If the entry is valid, move it
- if(IsValidHashEntry(ha, pHash))
+ // If the source table entry is valid,
+ if(pSource->dwFlags & MPQ_FILE_EXISTS)
{
- // Was the new index already resolved?
- // Note: Multiple hash table entries can point to the same block table entry
- if(NewIndexes[pHash->dwBlockIndex] == 0xFFFFFFFF)
- NewIndexes[pHash->dwBlockIndex] = dwNewTableSize++;
+ // Remember the index conversion
+ dwSrcIndex = (DWORD)(pSource - ha->pFileTable);
+ dwTrgIndex = (DWORD)(pTarget - ha->pFileTable);
+ DefragmentTable[dwSrcIndex] = dwTrgIndex;
+
+ // Move the entry, if needed
+ if(pTarget != pSource)
+ pTarget[0] = pSource[0];
+ pTarget++;
+
+ // Update the block table size
+ dwBlockTableSize = (DWORD)(pSource - ha->pFileTable) + 1;
}
}
- // We have to append reserved files
- dwNewTableSize += ha->dwReservedFiles;
-
- // Allocate new file table
- pNewTable = STORM_ALLOC(TFileEntry, ha->dwMaxFileCount);
- if(pNewTable != NULL)
+ // Did we defragment something?
+ if(pTarget < pFileTableEnd)
{
- // Fill the new table with zeros
- memset(pNewTable, 0, sizeof(TFileEntry) * ha->dwMaxFileCount);
+ // Clear the remaining file entries
+ memset(pTarget, 0, (pFileTableEnd - pTarget) * sizeof(TFileEntry));
- // Create map of OldBlockIndex => NewBlockIndex
- for(pHash = ha->pHashTable; pHash < pHashEnd; pHash++)
+ // Go through the hash table and relocate the block indexes
+ if(ha->pHashTable != NULL)
{
- // If the entry is valid, move it
- if(IsValidHashEntry(ha, pHash))
- {
- // The new index should be resolved by now
- assert(NewIndexes[pHash->dwBlockIndex] != 0xFFFFFFFF);
- dwNewBlockIndex = NewIndexes[pHash->dwBlockIndex];
+ TMPQHash * pHashTableEnd = ha->pHashTable + ha->pHeader->dwHashTableSize;
+ TMPQHash * pHash;
- // Remap the old entry to the new entry
- pNewTable[dwNewBlockIndex] = ha->pFileTable[pHash->dwBlockIndex];
- pHash->dwBlockIndex = dwNewBlockIndex;
- }
- }
-
- // Replace the old file table with the new table
- STORM_FREE(ha->pFileTable);
- ha->pFileTable = pNewTable;
- ha->dwFileTableSize = dwNewTableSize;
- }
-
- // Free the conversion table
- STORM_FREE(NewIndexes);
- }
-
- return ERROR_SUCCESS;
-}
-
-// Defragment the file table so it does not contain any gaps
-// Note: As long as all values of all TMPQHash::dwBlockIndex
-// are not HASH_ENTRY_FREE, the startup search index does not matter.
-// Hash table is circular, so as long as there is no terminator,
-// all entries will be found.
-int DefragmentHashTable(TMPQArchive * ha)
-{
- TMPQHash * pNewTable;
- TMPQHash * pHashEnd = ha->pHashTable + ha->pHeader->dwHashTableSize;
- TMPQHash * pNewHash;
- TMPQHash * pHash;
- DWORD dwNewTableSize = 0;
- DWORD dwNewIndex = 0;
-
- // Calculate how many entries in the hash table we really need
- for(pHash = ha->pHashTable; pHash < pHashEnd; pHash++)
- dwNewTableSize += IsValidHashEntry(ha, pHash) ? 1 : 0;
- dwNewTableSize = GetHashTableSizeForFileCount(dwNewTableSize);
-
- // Could the hash table be shrinked?
- if(dwNewTableSize < ha->dwHashTableSize)
- {
- pNewTable = STORM_ALLOC(TMPQHash, dwNewTableSize);
- if(pNewTable != NULL)
- {
- // Initialize the new table
- memset(pNewTable, 0xFF, dwNewTableSize * sizeof(TMPQHash));
- pNewHash = pNewTable;
-
- // Copy the entries from the current hash table to new hash table
- for(pHash = ha->pHashTable; pHash < pHashEnd; pHash++)
- {
- if(IsValidHashEntry(ha, pHash))
+ for(pHash = ha->pHashTable; pHash < pHashTableEnd; pHash++)
{
- assert(dwNewIndex < dwNewTableSize);
- pNewHash[dwNewIndex++] = pHash[0];
+ if(pHash->dwBlockIndex < ha->dwFileTableSize)
+ {
+ assert(DefragmentTable[pHash->dwBlockIndex] != HASH_ENTRY_FREE);
+ pHash->dwBlockIndex = DefragmentTable[pHash->dwBlockIndex];
+ }
}
}
-
- // Fill the remaining entries so they look like deleted
- while(dwNewIndex < dwNewTableSize)
- pNewHash[dwNewIndex++].dwBlockIndex = HASH_ENTRY_DELETED;
-
- // Swap the hash tables
- STORM_FREE(ha->pHashTable);
- ha->pHashTable = pNewTable;
- ha->dwHashTableSize = dwNewTableSize;
}
- }
- return ERROR_SUCCESS;
-}
-// Performs final validation (and possible defragmentation)
-// of malformed hash+block tables
-int ShrinkMalformedMpqTables(TMPQArchive * ha)
-{
- // Sanity checks
- assert(ha->pHeader->wFormatVersion == MPQ_FORMAT_VERSION_1);
- assert(ha->pHashTable != NULL);
- assert(ha->pFileTable != NULL);
+ // Save the block table size
+ ha->pHeader->dwBlockTableSize = ha->dwReservedFiles + dwBlockTableSize;
- // Now perform the defragmentation of the block table
- if(ha->dwFlags & MPQ_FLAG_BLOCK_TABLE_CUT)
- {
- assert(ha->dwFileTableSize == ha->pHeader->dwBlockTableSize);
- DefragmentFileTable(ha);
- }
-
- // If the hash table has been cut, we need to defragment it
- // This will greatly improve performance on searching and loading listfile
- if(ha->dwFlags & MPQ_FLAG_HASH_TABLE_CUT)
- {
- assert(ha->dwHashTableSize == ha->pHeader->dwHashTableSize);
- DefragmentHashTable(ha);
+ // Free the defragment table
+ STORM_FREE(DefragmentTable);
}
-/*
- TMPQHash * pSrcEntry = ha->pHashTable;
- TMPQHash * pTrgEntry = ha->pHashTable;
- DWORD dwNewEntryCount;
- DWORD dwNewTableSize;
-
- assert(ha->dwHashTableSize == ha->pHeader->dwHashTableSize);
-
- // Defragment the hash table
- for(i = 0; i < ha->dwHashTableSize; i++, pSrcEntry++)
- {
- // If the entry is valid, move it
- if(pSrcEntry->dwBlockIndex < ha->dwFileTableSize)
- {
- // Fix the hash table index in the file table and move the entry
- if(pTrgEntry != pSrcEntry)
- {
- pFileTable[pSrcEntry->dwBlockIndex].dwHashIndex = (DWORD)(pTrgEntry - pHashTable);
- *pTrgEntry = *pSrcEntry;
- }
-
- // Increment the number of used entries
- pTrgEntry++;
- }
- }
-
- // Calculate the new hash table size
- dwNewEntryCount = (DWORD)(pTrgEntry - pHashTable);
- dwNewTableSize = GetHashTableSizeForFileCount(dwNewEntryCount);
-
- // Mark the extra entries in the hash table as deleted
- // This causes every hash search to go over the entire hash table,
- // but if the hash table is cut, it would do it anyway
- memset(pHashTable + dwNewEntryCount, 0xFF, (dwNewTableSize - dwNewEntryCount) * sizeof(TMPQHash));
- for(i = dwNewEntryCount; i < dwNewTableSize; i++)
- pHashTable[i].dwBlockIndex = HASH_ENTRY_DELETED;
- // Change the hash table size in the header
- ha->dwHashTableSize = dwNewTableSize;
- }
-*/
return ERROR_SUCCESS;
}
@@ -2647,23 +2609,32 @@ int ShrinkMalformedMpqTables(TMPQArchive * ha)
int RebuildHetTable(TMPQArchive * ha)
{
TMPQHetTable * pOldHetTable = ha->pHetTable;
- ULONGLONG FileNameHash;
- DWORD i;
+ TFileEntry * pFileTableEnd;
+ TFileEntry * pFileEntry;
+ DWORD dwBlockTableSize = ha->dwFileTableSize;
int nError = ERROR_SUCCESS;
+ // If we are in the state of saving MPQ tables, the real size of block table
+ // must already have been calculated. Use that value instead
+ if(ha->dwFlags & MPQ_FLAG_SAVING_TABLES)
+ {
+ assert(ha->pHeader->dwBlockTableSize != 0);
+ dwBlockTableSize = ha->pHeader->dwBlockTableSize;
+ }
+
// Create new HET table based on the total number of entries in the file table
// Note that if we fail to create it, we just stop using HET table
- ha->pHetTable = CreateHetTable(ha->dwFileTableSize, 0, 0x40, NULL);
+ ha->pHetTable = CreateHetTable(dwBlockTableSize, 0, 0x40, NULL);
if(ha->pHetTable != NULL)
{
// Go through the file table again and insert all existing files
- for(i = 0; i < ha->dwFileTableSize; i++)
+ pFileTableEnd = ha->pFileTable + dwBlockTableSize;
+ for(pFileEntry = ha->pFileTable; pFileEntry < pFileTableEnd; pFileEntry++)
{
- if(ha->pFileTable[i].dwFlags & MPQ_FILE_EXISTS)
+ if(pFileEntry->dwFlags & MPQ_FILE_EXISTS)
{
// Get the high
- FileNameHash = ha->pFileTable[i].FileNameHash;
- nError = InsertHetEntry(ha->pHetTable, FileNameHash, i);
+ nError = InsertHetEntry(ha->pHetTable, pFileEntry->FileNameHash, (DWORD)(pFileEntry - ha->pFileTable));
if(nError != ERROR_SUCCESS)
break;
}
@@ -2677,31 +2648,33 @@ int RebuildHetTable(TMPQArchive * ha)
// Rebuilds the file table, removing all deleted file entries.
// Used when compacting the archive
-int RebuildFileTable(TMPQArchive * ha, DWORD dwNewHashTableSize, DWORD dwNewMaxFileCount)
+int RebuildFileTable(TMPQArchive * ha, DWORD dwNewHashTableSize)
{
- TFileEntry * pOldFileTableEnd = ha->pFileTable + ha->dwFileTableSize;
- TFileEntry * pOldFileTable = ha->pFileTable;
- TFileEntry * pFileTable;
TFileEntry * pFileEntry;
- TFileEntry * pOldEntry;
+ TMPQHash * pHashTableEnd = ha->pHashTable + ha->pHeader->dwHashTableSize;
TMPQHash * pOldHashTable = ha->pHashTable;
TMPQHash * pHashTable = NULL;
TMPQHash * pHash;
int nError = ERROR_SUCCESS;
// The new hash table size must be greater or equal to the current hash table size
- assert(dwNewHashTableSize >= ha->dwHashTableSize);
-
- // The new hash table size must be a power of two
+ assert(dwNewHashTableSize >= ha->pHeader->dwHashTableSize);
+ assert(dwNewHashTableSize >= ha->dwMaxFileCount);
assert((dwNewHashTableSize & (dwNewHashTableSize - 1)) == 0);
+ assert(ha->pHashTable != NULL);
- // Allocate the new file table
- pFileTable = pFileEntry = STORM_ALLOC(TFileEntry, dwNewMaxFileCount);
- if(pFileTable == NULL)
- nError = ERROR_NOT_ENOUGH_MEMORY;
+ // Reallocate the new file table, if needed
+ if(dwNewHashTableSize > ha->dwFileTableSize)
+ {
+ ha->pFileTable = STORM_REALLOC(TFileEntry, ha->pFileTable, dwNewHashTableSize);
+ if(ha->pFileTable == NULL)
+ return ERROR_NOT_ENOUGH_MEMORY;
+
+ memset(ha->pFileTable + ha->dwFileTableSize, 0, (dwNewHashTableSize - ha->dwFileTableSize) * sizeof(TFileEntry));
+ }
// Allocate new hash table
- if(nError == ERROR_SUCCESS && ha->pHashTable != NULL)
+ if(nError == ERROR_SUCCESS)
{
pHashTable = STORM_ALLOC(TMPQHash, dwNewHashTableSize);
if(pHashTable == NULL)
@@ -2712,56 +2685,31 @@ int RebuildFileTable(TMPQArchive * ha, DWORD dwNewHashTableSize, DWORD dwNewMaxF
if(nError == ERROR_SUCCESS)
{
// Make sure that the hash table is properly filled
- memset(pFileTable, 0x00, sizeof(TFileEntry) * dwNewMaxFileCount);
memset(pHashTable, 0xFF, sizeof(TMPQHash) * dwNewHashTableSize);
-
- // Set the new tables to the MPQ archive
- ha->pFileTable = pFileTable;
ha->pHashTable = pHashTable;
// Set the new limits to the MPQ archive
- ha->dwHashTableSize = dwNewHashTableSize;
- ha->dwMaxFileCount = dwNewMaxFileCount;
+ ha->pHeader->dwHashTableSize = dwNewHashTableSize;
- // Now copy all the file entries
- for(pOldEntry = pOldFileTable; pOldEntry < pOldFileTableEnd; pOldEntry++)
+ // Parse the old hash table and copy all entries to the new table
+ for(pHash = pOldHashTable; pHash < pHashTableEnd; pHash++)
{
- // If the file entry exists, we copy it to the new table
- // Otherwise, we skip it
- if(pOldEntry->dwFlags & MPQ_FILE_EXISTS)
+ if(IsValidHashEntry2(ha, pHash))
{
- // Copy the file entry
- *pFileEntry = *pOldEntry;
-
- // Create hash entry for it
- if(ha->pHashTable != NULL)
- {
- pHash = AllocateHashEntry(ha, pFileEntry);
- if(pHash == NULL)
- {
- nError = ERROR_DISK_FULL;
- break;
- }
- }
-
- // Move the file entry by one
- pFileEntry++;
+ pFileEntry = ha->pFileTable + pHash->dwBlockIndex;
+ AllocateHashEntry(ha, pFileEntry, pHash->lcLocale);
}
}
- // Update the file table size
- ha->dwFileTableSize = (DWORD)(pFileEntry - pFileTable);
+ // Increment the max file count for the file
+ ha->dwFileTableSize = dwNewHashTableSize;
+ ha->dwMaxFileCount = dwNewHashTableSize;
ha->dwFlags |= MPQ_FLAG_CHANGED;
- pFileTable = NULL;
}
// Now free the remaining entries
- if(pOldFileTable != NULL)
- STORM_FREE(pOldFileTable);
if(pOldHashTable != NULL)
STORM_FREE(pOldHashTable);
- if(pFileTable != NULL)
- STORM_FREE(pFileTable);
return nError;
}