aboutsummaryrefslogtreecommitdiff
path: root/src/SBaseFileTable.cpp
diff options
context:
space:
mode:
authorunknown <E:\Ladik\Mail>2015-04-18 23:12:47 +0200
committerunknown <E:\Ladik\Mail>2015-04-18 23:12:47 +0200
commit68e2c2527db10dcfdd9fdc2219c7fe9784abf738 (patch)
treead00acb4ce162d3a780eabdc67ebcc804591b471 /src/SBaseFileTable.cpp
parentd47864c13d4d5015445343f652ef3d3b3b73cb25 (diff)
+ SFileGetFileInfo(SFileMpqHashTable) now returns updated hash table instead of raw table loaded from the file
Diffstat (limited to 'src/SBaseFileTable.cpp')
-rw-r--r--src/SBaseFileTable.cpp90
1 files changed, 68 insertions, 22 deletions
diff --git a/src/SBaseFileTable.cpp b/src/SBaseFileTable.cpp
index 635db08..7209de2 100644
--- a/src/SBaseFileTable.cpp
+++ b/src/SBaseFileTable.cpp
@@ -568,6 +568,20 @@ int ConvertMpqHeaderToFormat4(
//-----------------------------------------------------------------------------
// Support for hash table
+static DWORD IsValidHashEntry(TMPQArchive * ha, TMPQHash * pHash)
+{
+ // The block table index must be smaller than file table size
+ if(pHash->dwBlockIndex < ha->dwFileTableSize)
+ {
+ // The block table entry must contain a file
+ if(ha->pFileTable[pHash->dwBlockIndex].dwFlags & MPQ_FILE_EXISTS)
+ return pHash->dwBlockIndex;
+ }
+
+ // Return an invalid index
+ return HASH_ENTRY_FREE;
+}
+
// Returns a hash table entry in the following order:
// 1) A hash table entry with the neutral locale
// 2) A hash table entry with any other locale
@@ -2034,7 +2048,7 @@ int CreateHashTable(TMPQArchive * ha, DWORD dwHashTableSize)
return ERROR_SUCCESS;
}
-TMPQHash * LoadHashTable(TMPQArchive * ha)
+static TMPQHash * LoadHashTable(TMPQArchive * ha)
{
TMPQHeader * pHeader = ha->pHeader;
ULONGLONG ByteOffset;
@@ -2455,35 +2469,67 @@ int BuildFileTable(TMPQArchive * ha)
// Defragment the file table so it does not contain any gaps
int DefragmentFileTable(TMPQArchive * ha)
{
- TFileEntry * pFileTable = ha->pFileTable;
- TFileEntry * pSrcEntry = pFileTable;
- TFileEntry * pTrgEntry = pFileTable;
+ TFileEntry * pNewTable;
+ TMPQHash * pHashEnd = ha->pHashTable + ha->pHeader->dwHashTableSize;
+ TMPQHash * pHash;
+ PDWORD NewIndexes;
+ DWORD dwNewTableSize = 0;
+ DWORD dwNewBlockIndex;
+ DWORD dwBlockIndex = 0;
- // Parse the file entries
- for(DWORD i = 0; i < ha->dwFileTableSize; i++, pSrcEntry++)
+ // Allocate brand new file table
+ NewIndexes = STORM_ALLOC(DWORD, ha->dwFileTableSize);
+ if(NewIndexes != NULL)
{
- // Is the source file entry does not contain anything valid, we skip it
- if(pSrcEntry->dwFlags & MPQ_FILE_EXISTS)
+ // Clear the file table
+ memset(NewIndexes, 0xFF, sizeof(DWORD) * ha->dwFileTableSize);
+
+ // Create map of OldBlockIndex => NewBlockIndex
+ for(pHash = ha->pHashTable; pHash < pHashEnd; pHash++)
{
- // Fix the block table index and move the entry
- if(pTrgEntry != pSrcEntry)
+ // If the entry is valid, move it
+ if((dwBlockIndex = IsValidHashEntry(ha, pHash)) != HASH_ENTRY_FREE)
{
- if(ha->pHashTable != NULL)
- ha->pHashTable[pSrcEntry->dwHashIndex].dwBlockIndex = (DWORD)(pTrgEntry - pFileTable);
- memmove(pTrgEntry, pSrcEntry, sizeof(TFileEntry));
+ // Was the new index already resolved?
+ // Note: More hash entries can point to the same file
+ if(NewIndexes[dwBlockIndex] == 0xFFFFFFFF)
+ NewIndexes[dwBlockIndex] = dwNewTableSize++;
}
+ }
+
+ // Allocate new file table
+ pNewTable = STORM_ALLOC(TFileEntry, dwNewTableSize);
+ if(pNewTable != NULL)
+ {
+ // Fill the new table with zeros
+ memset(pNewTable, 0, sizeof(TFileEntry) * dwNewTableSize);
+
+ // Create map of OldBlockIndex => NewBlockIndex
+ for(pHash = ha->pHashTable; pHash < pHashEnd; pHash++)
+ {
+ // If the entry is valid, move it
+ if((dwBlockIndex = IsValidHashEntry(ha, pHash)) != HASH_ENTRY_FREE)
+ {
+ // The new index should be resolved by now
+ assert(NewIndexes[dwBlockIndex] != 0xFFFFFFFF);
+ dwNewBlockIndex = NewIndexes[dwBlockIndex];
- // Move the target pointer by one entry
- pTrgEntry++;
+ // Remap the old entry to the new entry
+ pNewTable[dwNewBlockIndex] = ha->pFileTable[dwBlockIndex];
+ pHash->dwBlockIndex = dwNewBlockIndex;
+ }
+ }
+
+ // Replace the old file table with the new table
+ STORM_FREE(ha->pFileTable);
+ ha->pFileTable = pNewTable;
+ ha->dwFileTableSize = dwNewTableSize;
}
- }
- // Store the new size of the file table
- ha->dwFileTableSize = (DWORD)(pTrgEntry - pFileTable);
+ // Free the conversion table
+ STORM_FREE(NewIndexes);
+ }
- // Fill the rest with zeros
- if(ha->dwFileTableSize < ha->dwMaxFileCount)
- memset(pFileTable + ha->dwFileTableSize, 0, (ha->dwMaxFileCount - ha->dwFileTableSize) * sizeof(TFileEntry));
return ERROR_SUCCESS;
}
@@ -2502,7 +2548,7 @@ int ShrinkMalformedMpqTables(TMPQArchive * ha)
//
// Note: Unhandled possible case where multiple hash table entries
- // point to the same block tabl entry.
+ // point to the same block table entry.
//
// Now perform the defragmentation of the block table