From bab5fd87a34d92737e92d0850be05890a5ce8e24 Mon Sep 17 00:00:00 2001 From: Peter Keresztes Schmidt Date: Tue, 23 Jun 2020 08:54:12 +0200 Subject: Core/Misc: Replace Trinity::make_unique with std (#24869) --- src/common/DataStores/DB2FileLoader.cpp | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'src/common/DataStores/DB2FileLoader.cpp') diff --git a/src/common/DataStores/DB2FileLoader.cpp b/src/common/DataStores/DB2FileLoader.cpp index 824f3e47945..46c3f3d6b9c 100644 --- a/src/common/DataStores/DB2FileLoader.cpp +++ b/src/common/DataStores/DB2FileLoader.cpp @@ -348,7 +348,7 @@ bool DB2FileLoaderRegularImpl::LoadTableData(DB2FileSource* source, uint32 secti { if (!_data) { - _data = Trinity::make_unique(_header->RecordSize * _header->RecordCount + _header->StringTableSize + 8); + _data = std::make_unique(_header->RecordSize * _header->RecordCount + _header->StringTableSize + 8); _stringTable = &_data[_header->RecordSize * _header->RecordCount]; } @@ -1002,7 +1002,7 @@ DB2FileLoaderSparseImpl::DB2FileLoaderSparseImpl(char const* fileName, DB2FileLo _source(source), _totalRecordSize(0), _maxRecordSize(0), - _fieldAndArrayOffsets(loadInfo ? (Trinity::make_unique(loadInfo->Meta->FieldCount + loadInfo->FieldCount - (!loadInfo->Meta->HasIndexFieldInData() ? 1 : 0))) : nullptr) + _fieldAndArrayOffsets(loadInfo ? (std::make_unique(loadInfo->Meta->FieldCount + loadInfo->FieldCount - (!loadInfo->Meta->HasIndexFieldInData() ? 1 : 0))) : nullptr) { } @@ -1057,7 +1057,7 @@ bool DB2FileLoaderSparseImpl::LoadCatalogData(DB2FileSource* source, uint32 sect void DB2FileLoaderSparseImpl::SetAdditionalData(std::vector /*idTable*/, std::vector /*copyTable*/, std::vector> parentIndexes) { _parentIndexes = std::move(parentIndexes); - _recordBuffer = Trinity::make_unique(_maxRecordSize); + _recordBuffer = std::make_unique(_maxRecordSize); } char* DB2FileLoaderSparseImpl::AutoProduceData(uint32& maxId, char**& indexTable, std::vector& stringPool) @@ -1777,7 +1777,7 @@ bool DB2FileLoader::LoadHeaders(DB2FileSource* source, DB2FileLoadInfo const* lo if (loadInfo && (_header.ParentLookupCount && loadInfo->Meta->ParentIndexField == -1)) return false; - std::unique_ptr sections = Trinity::make_unique(_header.SectionCount); + std::unique_ptr sections = std::make_unique(_header.SectionCount); if (_header.SectionCount && !source->Read(sections.get(), sizeof(DB2SectionHeader) * _header.SectionCount)) return false; @@ -1808,7 +1808,7 @@ bool DB2FileLoader::LoadHeaders(DB2FileSource* source, DB2FileLoadInfo const* lo return false; } - std::unique_ptr fieldData = Trinity::make_unique(_header.FieldCount); + std::unique_ptr fieldData = std::make_unique(_header.FieldCount); if (!source->Read(fieldData.get(), sizeof(DB2FieldEntry) * _header.FieldCount)) return false; @@ -1818,7 +1818,7 @@ bool DB2FileLoader::LoadHeaders(DB2FileSource* source, DB2FileLoadInfo const* lo std::unique_ptr[]> commonValues; if (_header.ColumnMetaSize) { - columnMeta = Trinity::make_unique(_header.TotalFieldCount); + columnMeta = std::make_unique(_header.TotalFieldCount); if (!source->Read(columnMeta.get(), _header.ColumnMetaSize)) return false; @@ -1830,30 +1830,30 @@ bool DB2FileLoader::LoadHeaders(DB2FileSource* source, DB2FileLoadInfo const* lo columnMeta[loadInfo->Meta->IndexField].CompressionType == DB2ColumnCompression::SignedImmediate); } - palletValues = Trinity::make_unique[]>(_header.TotalFieldCount); + palletValues = std::make_unique[]>(_header.TotalFieldCount); for (uint32 i = 0; i < _header.TotalFieldCount; ++i) { if (columnMeta[i].CompressionType != DB2ColumnCompression::Pallet) continue; - palletValues[i] = Trinity::make_unique(columnMeta[i].AdditionalDataSize / sizeof(DB2PalletValue)); + palletValues[i] = std::make_unique(columnMeta[i].AdditionalDataSize / sizeof(DB2PalletValue)); if (!source->Read(palletValues[i].get(), columnMeta[i].AdditionalDataSize)) return false; } - palletArrayValues = Trinity::make_unique[]>(_header.TotalFieldCount); + palletArrayValues = std::make_unique[]>(_header.TotalFieldCount); for (uint32 i = 0; i < _header.TotalFieldCount; ++i) { if (columnMeta[i].CompressionType != DB2ColumnCompression::PalletArray) continue; - palletArrayValues[i] = Trinity::make_unique(columnMeta[i].AdditionalDataSize / sizeof(DB2PalletValue)); + palletArrayValues[i] = std::make_unique(columnMeta[i].AdditionalDataSize / sizeof(DB2PalletValue)); if (!source->Read(palletArrayValues[i].get(), columnMeta[i].AdditionalDataSize)) return false; } - std::unique_ptr[]> commonData = Trinity::make_unique[]>(_header.TotalFieldCount); - commonValues = Trinity::make_unique[]>(_header.TotalFieldCount); + std::unique_ptr[]> commonData = std::make_unique[]>(_header.TotalFieldCount); + commonValues = std::make_unique[]>(_header.TotalFieldCount); for (uint32 i = 0; i < _header.TotalFieldCount; ++i) { if (columnMeta[i].CompressionType != DB2ColumnCompression::CommonData) @@ -1862,7 +1862,7 @@ bool DB2FileLoader::LoadHeaders(DB2FileSource* source, DB2FileLoadInfo const* lo if (!columnMeta[i].AdditionalDataSize) continue; - commonData[i] = Trinity::make_unique(columnMeta[i].AdditionalDataSize / sizeof(DB2CommonValue)); + commonData[i] = std::make_unique(columnMeta[i].AdditionalDataSize / sizeof(DB2CommonValue)); if (!source->Read(commonData[i].get(), columnMeta[i].AdditionalDataSize)) return false; -- cgit v1.2.3