From 64e70ac1027d6086583e6561df7ea80014704e74 Mon Sep 17 00:00:00 2001 From: Terry Kim Date: Sat, 13 Jul 2019 20:19:07 -0700 Subject: [PATCH] Apply clang-format (Chromium) (#13) --- .gitignore | 4 +- Benchmark/main.cpp | 1148 ++++++++-------- Examples/main.cpp | 123 +- Unittests/CacheHashTableTest.cpp | 1018 +++++++------- Unittests/CheckedAllocator.h | 86 +- Unittests/ConnectionMonitorTest.cpp | 120 +- Unittests/EpochManagerTest.cpp | 259 ++-- Unittests/HashTableManagerTest.cpp | 181 ++- Unittests/HashTableRecordTest.cpp | 240 ++-- Unittests/HashTableServiceTest.cpp | 78 +- Unittests/Mocks.h | 37 +- Unittests/PerfInfoTest.cpp | 148 +- .../ReadWriteHashTableSerializerTest.cpp | 239 ++-- Unittests/ReadWriteHashTableTest.cpp | 1186 ++++++++--------- Unittests/SettingAdapterTest.cpp | 48 +- Unittests/Utils.cpp | 50 +- Unittests/Utils.h | 135 +- Unittests/UtilsTest.cpp | 79 +- inc/L4/Epoch/Config.h | 44 +- inc/L4/Epoch/EpochActionManager.h | 72 +- inc/L4/Epoch/EpochQueue.h | 229 ++-- inc/L4/Epoch/EpochRefPolicy.h | 53 +- inc/L4/Epoch/IEpochActionManager.h | 20 +- inc/L4/HashTable/Cache/HashTable.h | 600 ++++----- inc/L4/HashTable/Cache/Metadata.h | 166 +-- inc/L4/HashTable/Common/Record.h | 323 ++--- inc/L4/HashTable/Common/SettingAdapter.h | 40 +- inc/L4/HashTable/Common/SharedHashTable.h | 333 +++-- inc/L4/HashTable/Config.h | 122 +- inc/L4/HashTable/IHashTable.h | 99 +- inc/L4/HashTable/ReadWrite/HashTable.h | 844 ++++++------ inc/L4/HashTable/ReadWrite/Serializer.h | 302 ++--- .../Connection/ConnectionMonitor.h | 115 +- inc/L4/Interprocess/Connection/EndPointInfo.h | 48 +- .../Connection/EndPointInfoUtils.h | 30 +- inc/L4/Interprocess/Container/List.h | 17 +- inc/L4/Interprocess/Container/String.h | 20 +- inc/L4/Interprocess/Container/Vector.h | 17 +- inc/L4/Interprocess/Utils/Handle.h | 41 +- inc/L4/LocalMemory/Context.h | 68 +- inc/L4/LocalMemory/EpochManager.h | 172 ++- inc/L4/LocalMemory/HashTableManager.h | 144 +- inc/L4/LocalMemory/HashTableService.h | 55 +- inc/L4/LocalMemory/Memory.h | 60 +- inc/L4/Log/IPerfLogger.h | 37 +- inc/L4/Log/PerfCounter.h | 336 +++-- inc/L4/Log/PerfLogger.h | 57 +- inc/L4/Serialization/SerializerHelper.h | 74 +- inc/L4/Utils/AtomicOffsetPtr.h | 76 +- inc/L4/Utils/Clock.h | 27 +- inc/L4/Utils/ComparerHasher.h | 86 +- inc/L4/Utils/Containers.h | 46 +- inc/L4/Utils/Exception.h | 22 +- inc/L4/Utils/Lock.h | 162 +-- inc/L4/Utils/Math.h | 81 +- inc/L4/Utils/Properties.h | 68 +- inc/L4/Utils/RunningThread.h | 101 +- inc/L4/Utils/Windows.h | 61 +- inc/L4/detail/ToRawPointer.h | 12 +- src/EpochActionManager.cpp | 104 +- .../Connection/ConnectionMonitor.cpp | 230 ++-- .../Connection/EndPointInfoUtils.cpp | 35 +- src/Interprocess/Utils/Handle.cpp | 53 +- src/PerfLogger.cpp | 28 +- 64 files changed, 5013 insertions(+), 5896 deletions(-) diff --git a/.gitignore b/.gitignore index c35c234..02ad03a 100644 --- a/.gitignore +++ b/.gitignore @@ -66,4 +66,6 @@ ipch/ *.psess *.vsp *.vspx -*.sap \ No newline at end of file +*.sap +*.htm +*.user diff --git a/Benchmark/main.cpp b/Benchmark/main.cpp index 560c1aa..21c31c1 100644 --- a/Benchmark/main.cpp +++ b/Benchmark/main.cpp @@ -2,709 +2,661 @@ #include "L4/Log/PerfCounter.h" #include +#include +#include #include #include #include #include #include -#include -#include -class Timer -{ -public: - Timer() - : m_start{ std::chrono::high_resolution_clock::now() } - {} +class Timer { + public: + Timer() : m_start{std::chrono::high_resolution_clock::now()} {} - void Reset() - { - m_start = std::chrono::high_resolution_clock::now(); - } + void Reset() { m_start = std::chrono::high_resolution_clock::now(); } - std::chrono::microseconds GetElapsedTime() - { - return std::chrono::duration_cast( - std::chrono::high_resolution_clock::now() - m_start); - } + std::chrono::microseconds GetElapsedTime() { + return std::chrono::duration_cast( + std::chrono::high_resolution_clock::now() - m_start); + } -private: - std::chrono::time_point m_start; + private: + std::chrono::time_point m_start; }; +class SynchronizedTimer { + public: + SynchronizedTimer() = default; -class SynchronizedTimer -{ -public: - SynchronizedTimer() = default; - - void Start() - { - if (m_isStarted) - { - return; - } - m_isStarted = true; - m_startCount = std::chrono::high_resolution_clock::now().time_since_epoch().count(); - + void Start() { + if (m_isStarted) { + return; } + m_isStarted = true; + m_startCount = + std::chrono::high_resolution_clock::now().time_since_epoch().count(); + } - void End() - { - m_endCount = std::chrono::high_resolution_clock::now().time_since_epoch().count(); - } + void End() { + m_endCount = + std::chrono::high_resolution_clock::now().time_since_epoch().count(); + } - std::chrono::microseconds GetElapsedTime() - { - std::chrono::nanoseconds start{ m_startCount }; - std::chrono::nanoseconds end{ m_endCount }; - - return std::chrono::duration_cast(end - start); - } + std::chrono::microseconds GetElapsedTime() { + std::chrono::nanoseconds start{m_startCount}; + std::chrono::nanoseconds end{m_endCount}; -private: - std::atomic_bool m_isStarted = false; - std::atomic_uint64_t m_startCount; - std::atomic_uint64_t m_endCount; + return std::chrono::duration_cast(end - start); + } + + private: + std::atomic_bool m_isStarted = false; + std::atomic_uint64_t m_startCount; + std::atomic_uint64_t m_endCount; }; - -struct PerThreadInfoForWriteTest -{ - std::thread m_thread; - std::size_t m_dataSetSize = 0; - std::chrono::microseconds m_totalTime; +struct PerThreadInfoForWriteTest { + std::thread m_thread; + std::size_t m_dataSetSize = 0; + std::chrono::microseconds m_totalTime; }; - -struct PerThreadInfoForReadTest -{ - std::thread m_thread; - std::size_t m_dataSetSize = 0; - std::chrono::microseconds m_totalTime; +struct PerThreadInfoForReadTest { + std::thread m_thread; + std::size_t m_dataSetSize = 0; + std::chrono::microseconds m_totalTime; }; +struct CommandLineOptions { + static constexpr std::size_t c_defaultDataSetSize = 1000000; + static constexpr std::uint32_t c_defaultNumBuckets = 1000000; + static constexpr std::uint16_t c_defaultKeySize = 16; + static constexpr std::uint32_t c_defaultValueSize = 100; + static constexpr bool c_defaultRandomizeValueSize = false; + static constexpr std::uint32_t c_defaultNumIterationsPerGetContext = 1; + static constexpr std::uint16_t c_defaultNumThreads = 1; + static constexpr std::uint32_t c_defaultEpochProcessingIntervalInMilli = 10; + static constexpr std::uint16_t c_defaultNumActionsQueue = 1; + static constexpr std::uint32_t c_defaultRecordTimeToLiveInSeconds = 300; + static constexpr std::uint64_t c_defaultCacheSizeInBytes = 1024 * 1024 * 1024; + static constexpr bool c_defaultForceTimeBasedEviction = false; -struct CommandLineOptions -{ - static constexpr std::size_t c_defaultDataSetSize = 1000000; - static constexpr std::uint32_t c_defaultNumBuckets = 1000000; - static constexpr std::uint16_t c_defaultKeySize = 16; - static constexpr std::uint32_t c_defaultValueSize = 100; - static constexpr bool c_defaultRandomizeValueSize = false; - static constexpr std::uint32_t c_defaultNumIterationsPerGetContext = 1; - static constexpr std::uint16_t c_defaultNumThreads = 1; - static constexpr std::uint32_t c_defaultEpochProcessingIntervalInMilli = 10; - static constexpr std::uint16_t c_defaultNumActionsQueue = 1; - static constexpr std::uint32_t c_defaultRecordTimeToLiveInSeconds = 300; - static constexpr std::uint64_t c_defaultCacheSizeInBytes = 1024 * 1024 * 1024; - static constexpr bool c_defaultForceTimeBasedEviction = false; - - std::string m_module; - std::size_t m_dataSetSize = 0; - std::uint32_t m_numBuckets = 0; - std::uint16_t m_keySize = 0; - std::uint32_t m_valueSize = 0; - bool m_randomizeValueSize = false; - std::uint32_t m_numIterationsPerGetContext = 0; - std::uint16_t m_numThreads = 0; - std::uint32_t m_epochProcessingIntervalInMilli; - std::uint8_t m_numActionsQueue = 0; + std::string m_module; + std::size_t m_dataSetSize = 0; + std::uint32_t m_numBuckets = 0; + std::uint16_t m_keySize = 0; + std::uint32_t m_valueSize = 0; + bool m_randomizeValueSize = false; + std::uint32_t m_numIterationsPerGetContext = 0; + std::uint16_t m_numThreads = 0; + std::uint32_t m_epochProcessingIntervalInMilli; + std::uint8_t m_numActionsQueue = 0; - // The followings are specific for cache hash tables. - std::uint32_t m_recordTimeToLiveInSeconds = 0U; - std::uint64_t m_cacheSizeInBytes = 0U; - bool m_forceTimeBasedEviction = false; + // The followings are specific for cache hash tables. + std::uint32_t m_recordTimeToLiveInSeconds = 0U; + std::uint64_t m_cacheSizeInBytes = 0U; + bool m_forceTimeBasedEviction = false; - bool IsCachingModule() const - { - static const std::string c_cachingModulePrefix{ "cache" }; - return m_module.substr(0, c_cachingModulePrefix.size()) == c_cachingModulePrefix; - } + bool IsCachingModule() const { + static const std::string c_cachingModulePrefix{"cache"}; + return m_module.substr(0, c_cachingModulePrefix.size()) == + c_cachingModulePrefix; + } }; - -class DataGenerator -{ -public: - DataGenerator( - std::size_t dataSetSize, - std::uint16_t keySize, - std::uint32_t valueSize, - bool randomizeValueSize, - bool isDebugMode = false) - : m_dataSetSize{ dataSetSize } - , m_keySize{ keySize } - { - if (isDebugMode) - { - std::cout << "Generating data set with size = " << dataSetSize << std::endl; - } - - Timer timer; - - // Populate keys. - m_keys.resize(m_dataSetSize); - m_keysBuffer.resize(m_dataSetSize); - for (std::size_t i = 0; i < m_dataSetSize; ++i) - { - m_keysBuffer[i].resize(keySize); - std::generate(m_keysBuffer[i].begin(), m_keysBuffer[i].end(), std::rand); - std::snprintf(reinterpret_cast(m_keysBuffer[i].data()), keySize, "%llu", i); - m_keys[i].m_data = m_keysBuffer[i].data(); - m_keys[i].m_size = m_keySize; - } - - // Populate values buffer. Assumes srand() is already called. - std::generate(m_valuesBuffer.begin(), m_valuesBuffer.end(), std::rand); - - // Populate values. - m_values.resize(m_dataSetSize); - std::size_t currentIndex = 0; - for (std::size_t i = 0; i < m_dataSetSize; ++i) - { - m_values[i].m_data = &m_valuesBuffer[currentIndex % c_valuesBufferSize]; - m_values[i].m_size = randomizeValueSize ? rand() % valueSize : valueSize; - currentIndex += valueSize; - } - - if (isDebugMode) - { - std::cout << "Finished generating data in " - << timer.GetElapsedTime().count() << " microseconds" << std::endl; - } +class DataGenerator { + public: + DataGenerator(std::size_t dataSetSize, + std::uint16_t keySize, + std::uint32_t valueSize, + bool randomizeValueSize, + bool isDebugMode = false) + : m_dataSetSize{dataSetSize}, m_keySize{keySize} { + if (isDebugMode) { + std::cout << "Generating data set with size = " << dataSetSize + << std::endl; } - L4::IReadOnlyHashTable::Key GetKey(std::size_t index) const - { - return m_keys[index % m_dataSetSize]; + Timer timer; + + // Populate keys. + m_keys.resize(m_dataSetSize); + m_keysBuffer.resize(m_dataSetSize); + for (std::size_t i = 0; i < m_dataSetSize; ++i) { + m_keysBuffer[i].resize(keySize); + std::generate(m_keysBuffer[i].begin(), m_keysBuffer[i].end(), std::rand); + std::snprintf(reinterpret_cast(m_keysBuffer[i].data()), keySize, + "%llu", i); + m_keys[i].m_data = m_keysBuffer[i].data(); + m_keys[i].m_size = m_keySize; } - L4::IReadOnlyHashTable::Value GetValue(std::size_t index) const - { - return m_values[index % m_dataSetSize]; + // Populate values buffer. Assumes srand() is already called. + std::generate(m_valuesBuffer.begin(), m_valuesBuffer.end(), std::rand); + + // Populate values. + m_values.resize(m_dataSetSize); + std::size_t currentIndex = 0; + for (std::size_t i = 0; i < m_dataSetSize; ++i) { + m_values[i].m_data = &m_valuesBuffer[currentIndex % c_valuesBufferSize]; + m_values[i].m_size = randomizeValueSize ? rand() % valueSize : valueSize; + currentIndex += valueSize; } -private: - std::size_t m_dataSetSize; - std::uint16_t m_keySize; + if (isDebugMode) { + std::cout << "Finished generating data in " + << timer.GetElapsedTime().count() << " microseconds" + << std::endl; + } + } - std::vector> m_keysBuffer; - std::vector m_keys; - std::vector m_values; + L4::IReadOnlyHashTable::Key GetKey(std::size_t index) const { + return m_keys[index % m_dataSetSize]; + } - static const std::size_t c_valuesBufferSize = 64 * 1024; - std::array m_valuesBuffer; + L4::IReadOnlyHashTable::Value GetValue(std::size_t index) const { + return m_values[index % m_dataSetSize]; + } + + private: + std::size_t m_dataSetSize; + std::uint16_t m_keySize; + + std::vector> m_keysBuffer; + std::vector m_keys; + std::vector m_values; + + static const std::size_t c_valuesBufferSize = 64 * 1024; + std::array m_valuesBuffer; }; +void PrintHardwareInfo() { + SYSTEM_INFO sysInfo; + GetSystemInfo(&sysInfo); -void PrintHardwareInfo() -{ - SYSTEM_INFO sysInfo; - GetSystemInfo(&sysInfo); - - printf("\n"); - printf("Hardware information: \n"); - printf("-------------------------------------\n"); - printf("%22s | %10u |\n", "OEM ID", sysInfo.dwOemId); - printf("%22s | %10u |\n", "Number of processors", sysInfo.dwNumberOfProcessors); - printf("%22s | %10u |\n", "Page size", sysInfo.dwPageSize); - printf("%22s | %10u |\n", "Processor type", sysInfo.dwProcessorType); - printf("-------------------------------------\n"); - printf("\n"); + printf("\n"); + printf("Hardware information: \n"); + printf("-------------------------------------\n"); + printf("%22s | %10u |\n", "OEM ID", sysInfo.dwOemId); + printf("%22s | %10u |\n", "Number of processors", + sysInfo.dwNumberOfProcessors); + printf("%22s | %10u |\n", "Page size", sysInfo.dwPageSize); + printf("%22s | %10u |\n", "Processor type", sysInfo.dwProcessorType); + printf("-------------------------------------\n"); + printf("\n"); } +void PrintOptions(const CommandLineOptions& options) { + printf("------------------------------------------------------\n"); -void PrintOptions(const CommandLineOptions& options) -{ - printf("------------------------------------------------------\n"); + printf("%39s | %10llu |\n", "Data set size", options.m_dataSetSize); + printf("%39s | %10lu |\n", "Number of hash table buckets", + options.m_numBuckets); + printf("%39s | %10lu |\n", "Key size", options.m_keySize); + printf("%39s | %10lu |\n", "Value type", options.m_valueSize); + printf("%39s | %10lu |\n", "Number of iterations per GetContext()", + options.m_numIterationsPerGetContext); + printf("%39s | %10lu |\n", "Epoch processing interval (ms)", + options.m_epochProcessingIntervalInMilli); + printf("%39s | %10lu |\n", "Number of actions queue", + options.m_numActionsQueue); - printf("%39s | %10llu |\n", "Data set size", options.m_dataSetSize); - printf("%39s | %10lu |\n", "Number of hash table buckets", options.m_numBuckets); - printf("%39s | %10lu |\n", "Key size", options.m_keySize); - printf("%39s | %10lu |\n", "Value type", options.m_valueSize); - printf("%39s | %10lu |\n", "Number of iterations per GetContext()", options.m_numIterationsPerGetContext); - printf("%39s | %10lu |\n", "Epoch processing interval (ms)", options.m_epochProcessingIntervalInMilli); - printf("%39s | %10lu |\n", "Number of actions queue", options.m_numActionsQueue); + if (options.IsCachingModule()) { + printf("%39s | %10lu |\n", "Record time to live (s)", + options.m_recordTimeToLiveInSeconds); + printf("%39s | %10llu |\n", "Cache size in bytes", + options.m_cacheSizeInBytes); + printf("%39s | %10lu |\n", "Force time-based eviction", + options.m_forceTimeBasedEviction); + } - if (options.IsCachingModule()) - { - printf("%39s | %10lu |\n", "Record time to live (s)", options.m_recordTimeToLiveInSeconds); - printf("%39s | %10llu |\n", "Cache size in bytes", options.m_cacheSizeInBytes); - printf("%39s | %10lu |\n", "Force time-based eviction", options.m_forceTimeBasedEviction); - } - - printf("------------------------------------------------------\n\n"); + printf("------------------------------------------------------\n\n"); } - -void PrintHashTableCounters(const L4::HashTablePerfData& perfData) -{ - printf("HashTableCounter:\n"); - printf("----------------------------------------------------\n"); - for (auto i = 0; i < static_cast(L4::HashTablePerfCounter::Count); ++i) - { - printf("%35s | %12llu |\n", - L4::c_hashTablePerfCounterNames[i], - perfData.Get(static_cast(i))); - } - printf("----------------------------------------------------\n\n"); +void PrintHashTableCounters(const L4::HashTablePerfData& perfData) { + printf("HashTableCounter:\n"); + printf("----------------------------------------------------\n"); + for (auto i = 0; + i < static_cast(L4::HashTablePerfCounter::Count); ++i) { + printf("%35s | %12llu |\n", L4::c_hashTablePerfCounterNames[i], + perfData.Get(static_cast(i))); + } + printf("----------------------------------------------------\n\n"); } - -L4::HashTableConfig CreateHashTableConfig(const CommandLineOptions& options) -{ - return L4::HashTableConfig( - "Table1", - L4::HashTableConfig::Setting{ options.m_numBuckets }, - options.IsCachingModule() - ? boost::optional{ - L4::HashTableConfig::Cache{ - options.m_cacheSizeInBytes, - std::chrono::seconds{ options.m_recordTimeToLiveInSeconds }, - options.m_forceTimeBasedEviction }} - : boost::none); +L4::HashTableConfig CreateHashTableConfig(const CommandLineOptions& options) { + return L4::HashTableConfig( + "Table1", L4::HashTableConfig::Setting{options.m_numBuckets}, + options.IsCachingModule() + ? boost::optional< + L4::HashTableConfig::Cache>{L4::HashTableConfig::Cache{ + options.m_cacheSizeInBytes, + std::chrono::seconds{options.m_recordTimeToLiveInSeconds}, + options.m_forceTimeBasedEviction}} + : boost::none); } - -L4::EpochManagerConfig CreateEpochManagerConfig(const CommandLineOptions& options) -{ - return L4::EpochManagerConfig( - 10000U, - std::chrono::milliseconds(options.m_epochProcessingIntervalInMilli), - options.m_numActionsQueue); +L4::EpochManagerConfig CreateEpochManagerConfig( + const CommandLineOptions& options) { + return L4::EpochManagerConfig( + 10000U, + std::chrono::milliseconds(options.m_epochProcessingIntervalInMilli), + options.m_numActionsQueue); } +void ReadPerfTest(const CommandLineOptions& options) { + printf("Performing read-perf which reads all the records inserted:\n"); -void ReadPerfTest(const CommandLineOptions& options) -{ - printf("Performing read-perf which reads all the records inserted:\n"); + PrintOptions(options); - PrintOptions(options); + auto dataGenerator = std::make_unique( + options.m_dataSetSize, options.m_keySize, options.m_valueSize, + options.m_randomizeValueSize); - auto dataGenerator = std::make_unique( - options.m_dataSetSize, - options.m_keySize, - options.m_valueSize, - options.m_randomizeValueSize); + L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options)); + const auto hashTableIndex = + service.AddHashTable(CreateHashTableConfig(options)); - L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options)); - const auto hashTableIndex = service.AddHashTable(CreateHashTableConfig(options)); + // Insert data set. + auto context = service.GetContext(); + auto& hashTable = context[hashTableIndex]; - // Insert data set. - auto context = service.GetContext(); - auto& hashTable = context[hashTableIndex]; + std::vector randomIndices(options.m_dataSetSize); + for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i) { + randomIndices[i] = i; + } + if (options.m_numThreads > 0) { + // Randomize index only if multiple threads are running + // not to skew the results. + std::random_shuffle(randomIndices.begin(), randomIndices.end()); + } - std::vector randomIndices(options.m_dataSetSize); - for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i) - { - randomIndices[i] = i; - } - if (options.m_numThreads > 0) - { - // Randomize index only if multiple threads are running - // not to skew the results. - std::random_shuffle(randomIndices.begin(), randomIndices.end()); - } + for (int i = 0; i < options.m_dataSetSize; ++i) { + auto key = dataGenerator->GetKey(randomIndices[i]); + auto val = dataGenerator->GetValue(randomIndices[i]); - for (int i = 0; i < options.m_dataSetSize; ++i) - { - auto key = dataGenerator->GetKey(randomIndices[i]); - auto val = dataGenerator->GetValue(randomIndices[i]); + hashTable.Add(key, val); + } - hashTable.Add(key, val); - } + std::vector allInfo; + allInfo.resize(options.m_numThreads); - std::vector allInfo; - allInfo.resize(options.m_numThreads); + SynchronizedTimer overallTimer; + std::mutex mutex; + std::condition_variable cv; + const auto isCachingModule = options.IsCachingModule(); + bool isReady = false; - SynchronizedTimer overallTimer; - std::mutex mutex; - std::condition_variable cv; - const auto isCachingModule = options.IsCachingModule(); - bool isReady = false; + const std::size_t dataSetSizePerThread = + options.m_dataSetSize / options.m_numThreads; + for (std::uint16_t i = 0; i < options.m_numThreads; ++i) { + auto& info = allInfo[i]; - const std::size_t dataSetSizePerThread = options.m_dataSetSize / options.m_numThreads; - for (std::uint16_t i = 0; i < options.m_numThreads; ++i) - { - auto& info = allInfo[i]; + std::size_t startIndex = i * dataSetSizePerThread; + info.m_dataSetSize = (i + 1 == options.m_numThreads) + ? options.m_dataSetSize - startIndex + : dataSetSizePerThread; - std::size_t startIndex = i * dataSetSizePerThread; - info.m_dataSetSize = (i + 1 == options.m_numThreads) - ? options.m_dataSetSize - startIndex - : dataSetSizePerThread; - - info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, &cv, &isReady, &overallTimer] - { - { - std::unique_lock lock(mutex); - cv.wait(lock, [&] { return isReady == true; }); - } - - overallTimer.Start(); - - Timer totalTimer; - Timer getTimer; - - std::size_t iteration = 0; - bool isDone = false; - - while (!isDone) - { - auto context = service.GetContext(); - auto& hashTable = context[hashTableIndex]; - - for (std::uint32_t j = 0; !isDone && j < options.m_numIterationsPerGetContext; ++j) - { - auto key = dataGenerator->GetKey(startIndex + iteration); - L4::IReadOnlyHashTable::Value val; - - if (!hashTable.Get(key, val) && !isCachingModule) - { - throw std::runtime_error("Look up failure is not allowed in this test."); - } - - isDone = (++iteration == info.m_dataSetSize); - } - } - - overallTimer.End(); - - info.m_totalTime = totalTimer.GetElapsedTime(); - }); - } - - { + info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, + &cv, &isReady, &overallTimer] { + { std::unique_lock lock(mutex); - isReady = true; - } + cv.wait(lock, [&] { return isReady == true; }); + } - // Now, start the benchmarking for all threads. - cv.notify_all(); + overallTimer.Start(); - for (auto& info : allInfo) - { - info.m_thread.join(); - } + Timer totalTimer; + Timer getTimer; - PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData()); - - printf("Result:\n"); - printf(" | Total | |\n"); - printf(" | micros/op | microseconds | DataSetSize |\n"); - printf(" -----------------------------------------------------------\n"); - - for (std::size_t i = 0; i < allInfo.size(); ++i) - { - const auto& info = allInfo[i]; - - printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n", - (i + 1), - static_cast(info.m_totalTime.count()) / info.m_dataSetSize, - info.m_totalTime.count(), - info.m_dataSetSize); - } - printf(" -----------------------------------------------------------\n"); - - printf(" Overall | %11.3f | %14llu | %13llu |\n", - static_cast(overallTimer.GetElapsedTime().count()) / options.m_dataSetSize, - overallTimer.GetElapsedTime().count(), - options.m_dataSetSize); -} - - -void WritePerfTest(const CommandLineOptions& options) -{ - if (options.m_module == "overwrite-perf") - { - printf("Performing overwrite-perf (writing data with unique keys, then overwrite data with same keys):\n"); - } - else - { - printf("Performing write-perf (writing data with unique keys):\n"); - } - - PrintOptions(options); - - auto dataGenerator = std::make_unique( - options.m_dataSetSize, - options.m_keySize, - options.m_valueSize, - options.m_randomizeValueSize); - - L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options)); - const auto hashTableIndex = service.AddHashTable(CreateHashTableConfig(options)); - - if (options.m_module == "overwrite-perf") - { - std::vector randomIndices(options.m_dataSetSize); - for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i) - { - randomIndices[i] = i; - } - if (options.m_numThreads > 0) - { - // Randomize index only if multiple threads are running - // not to skew the results. - std::random_shuffle(randomIndices.begin(), randomIndices.end()); - } + std::size_t iteration = 0; + bool isDone = false; + while (!isDone) { auto context = service.GetContext(); auto& hashTable = context[hashTableIndex]; - for (int i = 0; i < options.m_dataSetSize; ++i) - { - const auto index = randomIndices[i]; - auto key = dataGenerator->GetKey(index); - auto val = dataGenerator->GetValue(index); + for (std::uint32_t j = 0; + !isDone && j < options.m_numIterationsPerGetContext; ++j) { + auto key = dataGenerator->GetKey(startIndex + iteration); + L4::IReadOnlyHashTable::Value val; - hashTable.Add(key, val); + if (!hashTable.Get(key, val) && !isCachingModule) { + throw std::runtime_error( + "Look up failure is not allowed in this test."); + } + + isDone = (++iteration == info.m_dataSetSize); } + } + + overallTimer.End(); + + info.m_totalTime = totalTimer.GetElapsedTime(); + }); + } + + { + std::unique_lock lock(mutex); + isReady = true; + } + + // Now, start the benchmarking for all threads. + cv.notify_all(); + + for (auto& info : allInfo) { + info.m_thread.join(); + } + + PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData()); + + printf("Result:\n"); + printf(" | Total | |\n"); + printf(" | micros/op | microseconds | DataSetSize |\n"); + printf(" -----------------------------------------------------------\n"); + + for (std::size_t i = 0; i < allInfo.size(); ++i) { + const auto& info = allInfo[i]; + + printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n", (i + 1), + static_cast(info.m_totalTime.count()) / info.m_dataSetSize, + info.m_totalTime.count(), info.m_dataSetSize); + } + printf(" -----------------------------------------------------------\n"); + + printf(" Overall | %11.3f | %14llu | %13llu |\n", + static_cast(overallTimer.GetElapsedTime().count()) / + options.m_dataSetSize, + overallTimer.GetElapsedTime().count(), options.m_dataSetSize); +} + +void WritePerfTest(const CommandLineOptions& options) { + if (options.m_module == "overwrite-perf") { + printf( + "Performing overwrite-perf (writing data with unique keys, then " + "overwrite data with same keys):\n"); + } else { + printf("Performing write-perf (writing data with unique keys):\n"); + } + + PrintOptions(options); + + auto dataGenerator = std::make_unique( + options.m_dataSetSize, options.m_keySize, options.m_valueSize, + options.m_randomizeValueSize); + + L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options)); + const auto hashTableIndex = + service.AddHashTable(CreateHashTableConfig(options)); + + if (options.m_module == "overwrite-perf") { + std::vector randomIndices(options.m_dataSetSize); + for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i) { + randomIndices[i] = i; + } + if (options.m_numThreads > 0) { + // Randomize index only if multiple threads are running + // not to skew the results. + std::random_shuffle(randomIndices.begin(), randomIndices.end()); } - std::vector allInfo; - allInfo.resize(options.m_numThreads); + auto context = service.GetContext(); + auto& hashTable = context[hashTableIndex]; - SynchronizedTimer overallTimer; - std::mutex mutex; - std::condition_variable cv; - bool isReady = false; + for (int i = 0; i < options.m_dataSetSize; ++i) { + const auto index = randomIndices[i]; + auto key = dataGenerator->GetKey(index); + auto val = dataGenerator->GetValue(index); - const std::size_t dataSetSizePerThread = options.m_dataSetSize / options.m_numThreads; - for (std::uint16_t i = 0; i < options.m_numThreads; ++i) - { - auto& info = allInfo[i]; - - std::size_t startIndex = i * dataSetSizePerThread; - info.m_dataSetSize = (i + 1 == options.m_numThreads) - ? options.m_dataSetSize - startIndex - : dataSetSizePerThread; - - info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, &cv, &isReady, &overallTimer] - { - { - std::unique_lock lock(mutex); - cv.wait(lock, [&] { return isReady == true; }); - } - - overallTimer.Start(); - - Timer totalTimer; - Timer addTimer; - - std::size_t iteration = 0; - bool isDone = false; - - while (!isDone) - { - auto context = service.GetContext(); - auto& hashTable = context[hashTableIndex]; - - for (std::uint32_t j = 0; !isDone && j < options.m_numIterationsPerGetContext; ++j) - { - const auto index = startIndex + iteration; - auto key = dataGenerator->GetKey(index); - auto val = dataGenerator->GetValue(index); - - hashTable.Add(key, val); - - isDone = (++iteration == info.m_dataSetSize); - } - } - - info.m_totalTime = totalTimer.GetElapsedTime(); - overallTimer.End(); - }); + hashTable.Add(key, val); } + } - { + std::vector allInfo; + allInfo.resize(options.m_numThreads); + + SynchronizedTimer overallTimer; + std::mutex mutex; + std::condition_variable cv; + bool isReady = false; + + const std::size_t dataSetSizePerThread = + options.m_dataSetSize / options.m_numThreads; + for (std::uint16_t i = 0; i < options.m_numThreads; ++i) { + auto& info = allInfo[i]; + + std::size_t startIndex = i * dataSetSizePerThread; + info.m_dataSetSize = (i + 1 == options.m_numThreads) + ? options.m_dataSetSize - startIndex + : dataSetSizePerThread; + + info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, + &cv, &isReady, &overallTimer] { + { std::unique_lock lock(mutex); - isReady = true; - } + cv.wait(lock, [&] { return isReady == true; }); + } - // Now, start the benchmarking for all threads. - cv.notify_all(); + overallTimer.Start(); - for (auto& info : allInfo) - { - info.m_thread.join(); - } + Timer totalTimer; + Timer addTimer; - PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData()); + std::size_t iteration = 0; + bool isDone = false; - printf("Result:\n"); - printf(" | Total | |\n"); - printf(" | micros/op | microseconds | DataSetSize |\n"); - printf(" -----------------------------------------------------------\n"); + while (!isDone) { + auto context = service.GetContext(); + auto& hashTable = context[hashTableIndex]; - for (std::size_t i = 0; i < allInfo.size(); ++i) - { - const auto& info = allInfo[i]; + for (std::uint32_t j = 0; + !isDone && j < options.m_numIterationsPerGetContext; ++j) { + const auto index = startIndex + iteration; + auto key = dataGenerator->GetKey(index); + auto val = dataGenerator->GetValue(index); - printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n", - (i + 1), - static_cast(info.m_totalTime.count()) / info.m_dataSetSize, - info.m_totalTime.count(), - info.m_dataSetSize); - } - printf(" -----------------------------------------------------------\n"); + hashTable.Add(key, val); - printf(" Overall | %11.3f | %14llu | %13llu |\n", - static_cast(overallTimer.GetElapsedTime().count()) / options.m_dataSetSize, - overallTimer.GetElapsedTime().count(), - options.m_dataSetSize); + isDone = (++iteration == info.m_dataSetSize); + } + } - if (options.m_numThreads == 1) - { - auto& perfData = service.GetContext()[hashTableIndex].GetPerfData(); - std::uint64_t totalBytes = perfData.Get(L4::HashTablePerfCounter::TotalKeySize) - + perfData.Get(L4::HashTablePerfCounter::TotalValueSize); + info.m_totalTime = totalTimer.GetElapsedTime(); + overallTimer.End(); + }); + } - auto& info = allInfo[0]; + { + std::unique_lock lock(mutex); + isReady = true; + } - double opsPerSec = static_cast(info.m_dataSetSize) / info.m_totalTime.count() * 1000000.0; - double MBPerSec = static_cast(totalBytes) / info.m_totalTime.count(); - printf(" %10.3f ops/sec %10.3f MB/sec\n", opsPerSec, MBPerSec); - } + // Now, start the benchmarking for all threads. + cv.notify_all(); + + for (auto& info : allInfo) { + info.m_thread.join(); + } + + PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData()); + + printf("Result:\n"); + printf(" | Total | |\n"); + printf(" | micros/op | microseconds | DataSetSize |\n"); + printf(" -----------------------------------------------------------\n"); + + for (std::size_t i = 0; i < allInfo.size(); ++i) { + const auto& info = allInfo[i]; + + printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n", (i + 1), + static_cast(info.m_totalTime.count()) / info.m_dataSetSize, + info.m_totalTime.count(), info.m_dataSetSize); + } + printf(" -----------------------------------------------------------\n"); + + printf(" Overall | %11.3f | %14llu | %13llu |\n", + static_cast(overallTimer.GetElapsedTime().count()) / + options.m_dataSetSize, + overallTimer.GetElapsedTime().count(), options.m_dataSetSize); + + if (options.m_numThreads == 1) { + auto& perfData = service.GetContext()[hashTableIndex].GetPerfData(); + std::uint64_t totalBytes = + perfData.Get(L4::HashTablePerfCounter::TotalKeySize) + + perfData.Get(L4::HashTablePerfCounter::TotalValueSize); + + auto& info = allInfo[0]; + + double opsPerSec = static_cast(info.m_dataSetSize) / + info.m_totalTime.count() * 1000000.0; + double MBPerSec = + static_cast(totalBytes) / info.m_totalTime.count(); + printf(" %10.3f ops/sec %10.3f MB/sec\n", opsPerSec, MBPerSec); + } } +CommandLineOptions Parse(int argc, char** argv) { + namespace po = boost::program_options; -CommandLineOptions Parse(int argc, char** argv) -{ - namespace po = boost::program_options; + po::options_description general("General options"); + general.add_options()("help", "produce a help message")( + "help-module", po::value(), + "produce a help for the following modules:\n" + " write-perf\n" + " overwrite-perf\n" + " read-perf\n" + " cache-read-perf\n" + " cache-write-perf\n")("module", po::value(), + "Runs the given module"); - po::options_description general("General options"); - general.add_options() - ("help", "produce a help message") - ("help-module", po::value(), - "produce a help for the following modules:\n" - " write-perf\n" - " overwrite-perf\n" - " read-perf\n" - " cache-read-perf\n" - " cache-write-perf\n") - ("module", po::value(), - "Runs the given module"); + po::options_description benchmarkOptions("Benchmark options."); + benchmarkOptions.add_options()("dataSetSize", + po::value()->default_value( + CommandLineOptions::c_defaultDataSetSize), + "data set size")( + "numBuckets", + po::value()->default_value( + CommandLineOptions::c_defaultNumBuckets), + "number of buckets")("keySize", + po::value()->default_value( + CommandLineOptions::c_defaultKeySize), + "key size in bytes")( + "valueSize", + po::value()->default_value( + CommandLineOptions::c_defaultValueSize), + "value size in bytes")("randomizeValueSize", "randomize value size")( + "numIterationsPerGetContext", + po::value()->default_value( + CommandLineOptions::c_defaultNumIterationsPerGetContext), + "number of iterations per GetContext()")( + "numThreads", + po::value()->default_value( + CommandLineOptions::c_defaultNumThreads), + "number of threads to create")( + "epochProcessingInterval", + po::value()->default_value( + CommandLineOptions::c_defaultEpochProcessingIntervalInMilli), + "epoch processing interval (ms)")( + "numActionsQueue", + po::value()->default_value( + CommandLineOptions::c_defaultNumActionsQueue), + "number of actions queue")( + "recordTimeToLive", + po::value()->default_value( + CommandLineOptions::c_defaultRecordTimeToLiveInSeconds), + "record time to live (s)")( + "cacheSize", + po::value()->default_value( + CommandLineOptions::c_defaultCacheSizeInBytes), + "cache size in bytes")( + "forceTimeBasedEviction", + po::value()->default_value( + CommandLineOptions::c_defaultForceTimeBasedEviction), + "force time based eviction"); - po::options_description benchmarkOptions("Benchmark options."); - benchmarkOptions.add_options() - ("dataSetSize", po::value()->default_value(CommandLineOptions::c_defaultDataSetSize), "data set size") - ("numBuckets", po::value()->default_value(CommandLineOptions::c_defaultNumBuckets), "number of buckets") - ("keySize", po::value()->default_value(CommandLineOptions::c_defaultKeySize), "key size in bytes") - ("valueSize", po::value()->default_value(CommandLineOptions::c_defaultValueSize), "value size in bytes") - ("randomizeValueSize", "randomize value size") - ("numIterationsPerGetContext", po::value()->default_value(CommandLineOptions::c_defaultNumIterationsPerGetContext), "number of iterations per GetContext()") - ("numThreads", po::value()->default_value(CommandLineOptions::c_defaultNumThreads), "number of threads to create") - ("epochProcessingInterval", po::value()->default_value(CommandLineOptions::c_defaultEpochProcessingIntervalInMilli), "epoch processing interval (ms)") - ("numActionsQueue", po::value()->default_value(CommandLineOptions::c_defaultNumActionsQueue), "number of actions queue") - ("recordTimeToLive", po::value()->default_value(CommandLineOptions::c_defaultRecordTimeToLiveInSeconds), "record time to live (s)") - ("cacheSize", po::value()->default_value(CommandLineOptions::c_defaultCacheSizeInBytes), "cache size in bytes") - ("forceTimeBasedEviction", po::value()->default_value(CommandLineOptions::c_defaultForceTimeBasedEviction), "force time based eviction"); + po::options_description all("Allowed options"); + all.add(general).add(benchmarkOptions); - po::options_description all("Allowed options"); - all.add(general).add(benchmarkOptions); + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, all), vm); + po::notify(vm); - po::variables_map vm; - po::store(po::parse_command_line(argc, argv, all), vm); - po::notify(vm); + CommandLineOptions options; - CommandLineOptions options; + if (vm.count("help")) { + std::cout << all; + } else if (vm.count("module")) { + options.m_module = vm["module"].as(); - if (vm.count("help")) - { - std::cout << all; + if (vm.count("dataSetSize")) { + options.m_dataSetSize = vm["dataSetSize"].as(); } - else if (vm.count("module")) - { - options.m_module = vm["module"].as(); - - if (vm.count("dataSetSize")) - { - options.m_dataSetSize = vm["dataSetSize"].as(); - } - if (vm.count("numBuckets")) - { - options.m_numBuckets = vm["numBuckets"].as(); - } - if (vm.count("keySize")) - { - options.m_keySize = vm["keySize"].as(); - } - if (vm.count("valueSize")) - { - options.m_valueSize = vm["valueSize"].as(); - } - if (vm.count("randomizeValueSize")) - { - options.m_randomizeValueSize = true; - } - if (vm.count("numIterationsPerGetContext")) - { - options.m_numIterationsPerGetContext = vm["numIterationsPerGetContext"].as(); - } - if (vm.count("numThreads")) - { - options.m_numThreads = vm["numThreads"].as(); - } - if (vm.count("epochProcessingInterval")) - { - options.m_epochProcessingIntervalInMilli = vm["epochProcessingInterval"].as(); - } - if (vm.count("numActionsQueue")) - { - options.m_numActionsQueue = vm["numActionsQueue"].as(); - } - if (vm.count("recordTimeToLive")) - { - options.m_recordTimeToLiveInSeconds = vm["recordTimeToLive"].as(); - } - if (vm.count("cacheSize")) - { - options.m_cacheSizeInBytes = vm["cacheSize"].as(); - } - if (vm.count("forceTimeBasedEviction")) - { - options.m_forceTimeBasedEviction = vm["forceTimeBasedEviction"].as(); - } + if (vm.count("numBuckets")) { + options.m_numBuckets = vm["numBuckets"].as(); } - else - { - std::cout << all; + if (vm.count("keySize")) { + options.m_keySize = vm["keySize"].as(); } + if (vm.count("valueSize")) { + options.m_valueSize = vm["valueSize"].as(); + } + if (vm.count("randomizeValueSize")) { + options.m_randomizeValueSize = true; + } + if (vm.count("numIterationsPerGetContext")) { + options.m_numIterationsPerGetContext = + vm["numIterationsPerGetContext"].as(); + } + if (vm.count("numThreads")) { + options.m_numThreads = vm["numThreads"].as(); + } + if (vm.count("epochProcessingInterval")) { + options.m_epochProcessingIntervalInMilli = + vm["epochProcessingInterval"].as(); + } + if (vm.count("numActionsQueue")) { + options.m_numActionsQueue = vm["numActionsQueue"].as(); + } + if (vm.count("recordTimeToLive")) { + options.m_recordTimeToLiveInSeconds = + vm["recordTimeToLive"].as(); + } + if (vm.count("cacheSize")) { + options.m_cacheSizeInBytes = vm["cacheSize"].as(); + } + if (vm.count("forceTimeBasedEviction")) { + options.m_forceTimeBasedEviction = + vm["forceTimeBasedEviction"].as(); + } + } else { + std::cout << all; + } - return options; + return options; } +int main(int argc, char** argv) { + auto options = Parse(argc, argv); -int main(int argc, char** argv) -{ - auto options = Parse(argc, argv); - - if (options.m_module.empty()) - { - return 0; - } - - std::srand(static_cast(time(NULL))); - - PrintHardwareInfo(); - - if (options.m_module == "write-perf" - || options.m_module == "overwrite-perf" - || options.m_module == "cache-write-perf") - { - WritePerfTest(options); - } - else if (options.m_module == "read-perf" - || options.m_module == "cache-read-perf") - { - ReadPerfTest(options); - } - else - { - std::cout << "Unknown module: " << options.m_module << std::endl; - } - + if (options.m_module.empty()) { return 0; -} + } + std::srand(static_cast(time(NULL))); + + PrintHardwareInfo(); + + if (options.m_module == "write-perf" || + options.m_module == "overwrite-perf" || + options.m_module == "cache-write-perf") { + WritePerfTest(options); + } else if (options.m_module == "read-perf" || + options.m_module == "cache-read-perf") { + ReadPerfTest(options); + } else { + std::cout << "Unknown module: " << options.m_module << std::endl; + } + + return 0; +} diff --git a/Examples/main.cpp b/Examples/main.cpp index f87f3cd..3c219f9 100644 --- a/Examples/main.cpp +++ b/Examples/main.cpp @@ -6,95 +6,84 @@ using namespace L4; -void SimpleExample() -{ - EpochManagerConfig epochConfig{ 1000, std::chrono::milliseconds(100), 1 }; - LocalMemory::HashTableService service{ epochConfig }; +void SimpleExample() { + EpochManagerConfig epochConfig{1000, std::chrono::milliseconds(100), 1}; + LocalMemory::HashTableService service{epochConfig}; - auto hashTableIndex = service.AddHashTable( - HashTableConfig("Table1", HashTableConfig::Setting{ 1000000 })); + auto hashTableIndex = service.AddHashTable( + HashTableConfig("Table1", HashTableConfig::Setting{1000000})); - std::vector> keyValuePairs = - { - { "key1", "value1" }, - { "key2", "value2" }, - { "key3", "value3" }, - { "key4", "value4" }, - { "key5", "value5" }, - }; + std::vector> keyValuePairs = { + {"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}, + {"key4", "value4"}, {"key5", "value5"}, + }; - // Write data. - { - auto context = service.GetContext(); - auto& hashTable = context[hashTableIndex]; + // Write data. + { + auto context = service.GetContext(); + auto& hashTable = context[hashTableIndex]; - for (const auto& keyValuePair : keyValuePairs) - { - const auto& keyStr = keyValuePair.first; - const auto& valStr = keyValuePair.second; + for (const auto& keyValuePair : keyValuePairs) { + const auto& keyStr = keyValuePair.first; + const auto& valStr = keyValuePair.second; - IWritableHashTable::Key key; - key.m_data = reinterpret_cast(keyStr.c_str()); - key.m_size = keyStr.size(); + IWritableHashTable::Key key; + key.m_data = reinterpret_cast(keyStr.c_str()); + key.m_size = keyStr.size(); - IWritableHashTable::Value val; - val.m_data = reinterpret_cast(valStr.c_str()); - val.m_size = valStr.size(); + IWritableHashTable::Value val; + val.m_data = reinterpret_cast(valStr.c_str()); + val.m_size = valStr.size(); - hashTable.Add(key, val); - } + hashTable.Add(key, val); } + } - // Read data. - { - auto context = service.GetContext(); + // Read data. + { + auto context = service.GetContext(); - // Once a context is retrieved, the operations such as - // operator[] on the context and Get() are lock-free. - auto& hashTable = context[hashTableIndex]; + // Once a context is retrieved, the operations such as + // operator[] on the context and Get() are lock-free. + auto& hashTable = context[hashTableIndex]; - for (const auto& keyValuePair : keyValuePairs) - { - const auto& keyStr = keyValuePair.first; + for (const auto& keyValuePair : keyValuePairs) { + const auto& keyStr = keyValuePair.first; - IWritableHashTable::Key key; - key.m_data = reinterpret_cast(keyStr.c_str()); - key.m_size = keyStr.size(); + IWritableHashTable::Key key; + key.m_data = reinterpret_cast(keyStr.c_str()); + key.m_size = keyStr.size(); - IWritableHashTable::Value val; - hashTable.Get(key, val); + IWritableHashTable::Value val; + hashTable.Get(key, val); - std::cout << std::string(reinterpret_cast(val.m_data), val.m_size) << std::endl; - } + std::cout << std::string(reinterpret_cast(val.m_data), + val.m_size) + << std::endl; } + } } -void CacheHashTableExample() -{ - LocalMemory::HashTableService service; +void CacheHashTableExample() { + LocalMemory::HashTableService service; - HashTableConfig::Cache cacheConfig{ - 1024 * 1024, // 1MB cache - std::chrono::seconds(60), // Record will exipre in 60 seconds - true // Remove any expired records during eviction. - }; + HashTableConfig::Cache cacheConfig{ + 1024 * 1024, // 1MB cache + std::chrono::seconds(60), // Record will exipre in 60 seconds + true // Remove any expired records during eviction. + }; - auto hashTableIndex = service.AddHashTable( - HashTableConfig( - "Table1", - HashTableConfig::Setting{ 1000000 }, - cacheConfig)); + auto hashTableIndex = service.AddHashTable(HashTableConfig( + "Table1", HashTableConfig::Setting{1000000}, cacheConfig)); - (void)hashTableIndex; - // Use hash table similar to SimpleExample(). + (void)hashTableIndex; + // Use hash table similar to SimpleExample(). } -int main() -{ - SimpleExample(); +int main() { + SimpleExample(); - CacheHashTableExample(); + CacheHashTableExample(); - return 0; + return 0; } - diff --git a/Unittests/CacheHashTableTest.cpp b/Unittests/CacheHashTableTest.cpp index cd1105e..4b8f347 100644 --- a/Unittests/CacheHashTableTest.cpp +++ b/Unittests/CacheHashTableTest.cpp @@ -1,614 +1,524 @@ -#include #include +#include #include -#include "Utils.h" -#include "Mocks.h" #include "CheckedAllocator.h" -#include "L4/HashTable/Common/Record.h" -#include "L4/HashTable/Cache/Metadata.h" #include "L4/HashTable/Cache/HashTable.h" +#include "L4/HashTable/Cache/Metadata.h" +#include "L4/HashTable/Common/Record.h" +#include "Mocks.h" +#include "Utils.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { using namespace HashTable::Cache; using namespace std::chrono; -class MockClock -{ -public: - MockClock() = default; +class MockClock { + public: + MockClock() = default; - seconds GetCurrentEpochTime() const - { - return s_currentEpochTime; - } + seconds GetCurrentEpochTime() const { return s_currentEpochTime; } - static void SetEpochTime(seconds time) - { - s_currentEpochTime = time; - } + static void SetEpochTime(seconds time) { s_currentEpochTime = time; } - static void IncrementEpochTime(seconds increment) - { - s_currentEpochTime += increment; - } + static void IncrementEpochTime(seconds increment) { + s_currentEpochTime += increment; + } -private: - static seconds s_currentEpochTime; + private: + static seconds s_currentEpochTime; }; -seconds MockClock::s_currentEpochTime{ 0U }; +seconds MockClock::s_currentEpochTime{0U}; +class CacheHashTableTestFixture { + public: + using Allocator = CheckedAllocator<>; + using CacheHashTable = WritableHashTable; + using ReadOnlyCacheHashTable = ReadOnlyHashTable; + using HashTable = CacheHashTable::HashTable; -class CacheHashTableTestFixture -{ -public: - using Allocator = CheckedAllocator<>; - using CacheHashTable = WritableHashTable; - using ReadOnlyCacheHashTable = ReadOnlyHashTable; - using HashTable = CacheHashTable::HashTable; + CacheHashTableTestFixture() + : m_allocator{}, + m_hashTable{HashTable::Setting{100U}, m_allocator}, + m_epochManager{} { + MockClock::SetEpochTime(seconds{0U}); + } - CacheHashTableTestFixture() - : m_allocator{} - , m_hashTable { HashTable::Setting{ 100U }, m_allocator } - , m_epochManager{} - { - MockClock::SetEpochTime(seconds{ 0U }); - } + CacheHashTableTestFixture(const CacheHashTableTestFixture&) = delete; + CacheHashTableTestFixture& operator=(const CacheHashTableTestFixture&) = + delete; - CacheHashTableTestFixture(const CacheHashTableTestFixture&) = delete; - CacheHashTableTestFixture& operator=(const CacheHashTableTestFixture&) = delete; + protected: + template + bool Get(TCacheHashTable& hashTable, + const std::string& key, + IReadOnlyHashTable::Value& value) { + return hashTable.Get( + Utils::ConvertFromString(key.c_str()), value); + } -protected: - template - bool Get(TCacheHashTable& hashTable, const std::string& key, IReadOnlyHashTable::Value& value) - { - return hashTable.Get( - Utils::ConvertFromString(key.c_str()), - value); - } + void Add(CacheHashTable& hashTable, + const std::string& key, + const std::string& value) { + hashTable.Add( + Utils::ConvertFromString(key.c_str()), + Utils::ConvertFromString(value.c_str())); + } - void Add(CacheHashTable& hashTable, const std::string& key, const std::string& value) - { - hashTable.Add( - Utils::ConvertFromString(key.c_str()), - Utils::ConvertFromString(value.c_str())); - } + void Remove(CacheHashTable& hashTable, const std::string& key) { + hashTable.Remove( + Utils::ConvertFromString(key.c_str())); + } - void Remove(CacheHashTable& hashTable, const std::string& key) - { - hashTable.Remove(Utils::ConvertFromString(key.c_str())); - } + template + bool CheckRecord(TCacheHashTable& hashTable, + const std::string& key, + const std::string& expectedValue) { + IReadOnlyHashTable::Value value; + return Get(hashTable, key, value) && AreTheSame(value, expectedValue); + } - template - bool CheckRecord(TCacheHashTable& hashTable, const std::string& key, const std::string& expectedValue) - { - IReadOnlyHashTable::Value value; - return Get(hashTable, key, value) && AreTheSame(value, expectedValue); - } + bool AreTheSame(const IReadOnlyHashTable::Value& actual, + const std::string& expected) { + return (actual.m_size == expected.size()) && + !memcmp(actual.m_data, expected.c_str(), actual.m_size); + } - bool AreTheSame(const IReadOnlyHashTable::Value& actual, const std::string& expected) - { - return (actual.m_size == expected.size()) - && !memcmp(actual.m_data, expected.c_str(), actual.m_size); - } + template + bool Exist(const Blob& actual, const std::vector& expectedSet) { + const std::string actualStr(reinterpret_cast(actual.m_data), + actual.m_size); - template - bool Exist(const Blob& actual, const std::vector& expectedSet) - { - const std::string actualStr( - reinterpret_cast(actual.m_data), - actual.m_size); + return std::find(expectedSet.cbegin(), expectedSet.cend(), actualStr) != + expectedSet.cend(); + } - return std::find(expectedSet.cbegin(), expectedSet.cend(), actualStr) != expectedSet.cend(); - } - - Allocator m_allocator; - HashTable m_hashTable; - MockEpochManager m_epochManager; - MockClock m_clock; + Allocator m_allocator; + HashTable m_hashTable; + MockEpochManager m_epochManager; + MockClock m_clock; }; - BOOST_AUTO_TEST_SUITE(CacheHashTableTests) +BOOST_AUTO_TEST_CASE(MetadataTest) { + std::vector buffer(20); -BOOST_AUTO_TEST_CASE(MetadataTest) -{ - std::vector buffer(20); + // The following will test with 1..8 byte alignments. + for (std::uint16_t i = 0U; i < 8U; ++i) { + std::uint32_t* metadataBuffer = + reinterpret_cast(buffer.data() + i); + seconds currentEpochTime{0x7FABCDEF}; - // The following will test with 1..8 byte alignments. - for (std::uint16_t i = 0U; i < 8U; ++i) - { - std::uint32_t* metadataBuffer = reinterpret_cast(buffer.data() + i); - seconds currentEpochTime{ 0x7FABCDEF }; + Metadata metadata{metadataBuffer, currentEpochTime}; - Metadata metadata{ metadataBuffer, currentEpochTime }; + BOOST_CHECK(currentEpochTime == metadata.GetEpochTime()); - BOOST_CHECK(currentEpochTime == metadata.GetEpochTime()); + // 10 seconds have elapsed. + currentEpochTime += seconds{10U}; - // 10 seconds have elapsed. - currentEpochTime += seconds{ 10U }; + // Check the expiration based on the time to live value. + BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{15})); + BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{10})); + BOOST_CHECK(metadata.IsExpired(currentEpochTime, seconds{5U})); - // Check the expiration based on the time to live value. - BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{ 15 })); - BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{ 10 })); - BOOST_CHECK(metadata.IsExpired(currentEpochTime, seconds{ 5U })); + // Test access state. + BOOST_CHECK(!metadata.IsAccessed()); - // Test access state. - BOOST_CHECK(!metadata.IsAccessed()); + metadata.UpdateAccessStatus(true); + BOOST_CHECK(metadata.IsAccessed()); - metadata.UpdateAccessStatus(true); - BOOST_CHECK(metadata.IsAccessed()); - - metadata.UpdateAccessStatus(false); - BOOST_CHECK(!metadata.IsAccessed()); - } + metadata.UpdateAccessStatus(false); + BOOST_CHECK(!metadata.IsAccessed()); + } } +BOOST_FIXTURE_TEST_CASE(ExpirationTest, CacheHashTableTestFixture) { + // Don't care about evict in this test case, so make the cache size big. + constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; + constexpr seconds c_recordTimeToLive{20U}; -BOOST_FIXTURE_TEST_CASE(ExpirationTest, CacheHashTableTestFixture) -{ + CacheHashTable hashTable(m_hashTable, m_epochManager, c_maxCacheSizeInBytes, + c_recordTimeToLive, false); + + const std::vector> c_keyValuePairs = { + {"key1", "value1"}, + {"key2", "value2"}, + {"key3", "value3"}, + {"key4", "value4"}, + {"key5", "value5"}}; + + // Add 5 records at a different epoch time (10 seconds increment). + for (const auto& pair : c_keyValuePairs) { + MockClock::IncrementEpochTime(seconds{10}); + Add(hashTable, pair.first, pair.second); + + // Make sure the records can be retrieved right away. The record has not + // been expired since the clock hasn't moved yet. + BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + } + + const auto& perfData = hashTable.GetPerfData(); + Utils::ValidateCounters(perfData, {{HashTablePerfCounter::CacheHitCount, 5}}); + + // Now we have the following data sets: + // | Key | Value | Creation time | + // | key1 | value1 | 10 | + // | key2 | value2 | 20 | + // | key3 | value3 | 30 | + // | key4 | value4 | 40 | + // | key5 | value5 | 50 | + // And the current clock is at 50. + + // Do look ups and check expired records. + for (const auto& pair : c_keyValuePairs) { + IReadOnlyHashTable::Value value; + // Our time to live value is 20, so key0 and key0 records should be expired. + if (pair.first == "key1" || pair.first == "key2") { + BOOST_CHECK(!Get(hashTable, pair.first, value)); + } else { + BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + } + } + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::CacheHitCount, 8}, + {HashTablePerfCounter::CacheMissCount, 2}}); + + MockClock::IncrementEpochTime(seconds{100}); + + // All the records should be expired now. + for (const auto& pair : c_keyValuePairs) { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(hashTable, pair.first, value)); + } + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::CacheHitCount, 8}, + {HashTablePerfCounter::CacheMissCount, 7}}); +} + +BOOST_FIXTURE_TEST_CASE(CacheHashTableIteratorTest, CacheHashTableTestFixture) { + // Don't care about evict in this test case, so make the cache size big. + constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; + constexpr seconds c_recordTimeToLive{20U}; + + CacheHashTable hashTable(m_hashTable, m_epochManager, c_maxCacheSizeInBytes, + c_recordTimeToLive, false); + + const std::vector c_keys = {"key1", "key2", "key3", "key4", + "key5"}; + const std::vector c_vals = {"val1", "val2", "val3", "val4", + "val5"}; + + // Add 5 records at a different epoch time (3 seconds increment). + for (std::size_t i = 0; i < c_keys.size(); ++i) { + MockClock::IncrementEpochTime(seconds{3}); + Add(hashTable, c_keys[i], c_vals[i]); + } + + // Now we have the following data sets: + // | Key | Value | Creation time | + // | key1 | value1 | 3 | + // | key2 | value2 | 6 | + // | key3 | value3 | 9 | + // | key4 | value4 | 12 | + // | key5 | value5 | 15 | + // And the current clock is at 15. + + auto iterator = hashTable.GetIterator(); + std::uint16_t numRecords = 0; + while (iterator->MoveNext()) { + ++numRecords; + BOOST_CHECK(Exist(iterator->GetKey(), c_keys)); + BOOST_CHECK(Exist(iterator->GetValue(), c_vals)); + } + + BOOST_CHECK_EQUAL(numRecords, 5); + + // The clock becomes 30 and key1, key2 and key3 should expire. + MockClock::IncrementEpochTime(seconds{15}); + + iterator = hashTable.GetIterator(); + numRecords = 0; + while (iterator->MoveNext()) { + ++numRecords; + BOOST_CHECK( + Exist(iterator->GetKey(), + std::vector{c_keys.cbegin() + 2, c_keys.cend()})); + BOOST_CHECK( + Exist(iterator->GetValue(), + std::vector{c_vals.cbegin() + 2, c_vals.cend()})); + } + + BOOST_CHECK_EQUAL(numRecords, 2); + + // The clock becomes 40 and all records should be expired now. + MockClock::IncrementEpochTime(seconds{10}); + + iterator = hashTable.GetIterator(); + while (iterator->MoveNext()) { + BOOST_CHECK(false); + } +} + +BOOST_FIXTURE_TEST_CASE(TimeBasedEvictionTest, CacheHashTableTestFixture) { + // We only care about time-based eviction in this test, so make the cache size + // big. + constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; + constexpr seconds c_recordTimeToLive{10U}; + + // Hash table with one bucket makes testing the time-based eviction easy. + HashTable internalHashTable{HashTable::Setting{1}, m_allocator}; + CacheHashTable hashTable(internalHashTable, m_epochManager, + c_maxCacheSizeInBytes, c_recordTimeToLive, true); + + const std::vector> c_keyValuePairs = { + {"key1", "value1"}, + {"key2", "value2"}, + {"key3", "value3"}, + {"key4", "value4"}, + {"key5", "value5"}}; + + for (const auto& pair : c_keyValuePairs) { + Add(hashTable, pair.first, pair.second); + BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + } + + const auto& perfData = hashTable.GetPerfData(); + Utils::ValidateCounters(perfData, + { + {HashTablePerfCounter::CacheHitCount, 5}, + {HashTablePerfCounter::RecordsCount, 5}, + {HashTablePerfCounter::EvictedRecordsCount, 0}, + }); + + MockClock::IncrementEpochTime(seconds{20}); + + // All the records should be expired now. + for (const auto& pair : c_keyValuePairs) { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(hashTable, pair.first, value)); + } + + Utils::ValidateCounters(perfData, + { + {HashTablePerfCounter::CacheHitCount, 5}, + {HashTablePerfCounter::CacheMissCount, 5}, + {HashTablePerfCounter::RecordsCount, 5}, + {HashTablePerfCounter::EvictedRecordsCount, 0}, + }); + + // Now try to add one record and all the expired records should be evicted. + const auto& keyValuePair = c_keyValuePairs[0]; + Add(hashTable, keyValuePair.first, keyValuePair.second); + + Utils::ValidateCounters(perfData, + { + {HashTablePerfCounter::RecordsCount, 1}, + {HashTablePerfCounter::EvictedRecordsCount, 5}, + }); +} + +BOOST_FIXTURE_TEST_CASE(EvcitAllRecordsTest, CacheHashTableTestFixture) { + const auto& perfData = m_hashTable.m_perfData; + const auto initialTotalIndexSize = + perfData.Get(HashTablePerfCounter::TotalIndexSize); + const std::uint64_t c_maxCacheSizeInBytes = 500 + initialTotalIndexSize; + constexpr seconds c_recordTimeToLive{5}; + + CacheHashTable hashTable{m_hashTable, m_epochManager, c_maxCacheSizeInBytes, + c_recordTimeToLive, false}; + + Utils::ValidateCounters(perfData, + { + {HashTablePerfCounter::EvictedRecordsCount, 0}, + }); + + const std::vector> c_keyValuePairs = { + {"key1", "value1"}, + {"key2", "value2"}, + {"key3", "value3"}, + {"key4", "value4"}, + {"key5", "value5"}}; + + for (const auto& pair : c_keyValuePairs) { + Add(hashTable, pair.first, pair.second); + } + + using L4::HashTable::RecordSerializer; + + // Variable key/value sizes. + const auto recordOverhead = + RecordSerializer{0U, 0U}.CalculateRecordOverhead(); + + Utils::ValidateCounters( + perfData, + { + {HashTablePerfCounter::RecordsCount, c_keyValuePairs.size()}, + {HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + (c_keyValuePairs.size() * recordOverhead)}, + {HashTablePerfCounter::EvictedRecordsCount, 0}, + }); + + // Make sure all data records added are present and update the access status + // for each record in order to test that accessed records are deleted when + // it's under memory constraint. + for (const auto& pair : c_keyValuePairs) { + BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + } + + // Now insert a record that will force all the records to be evicted due to + // size. + std::string bigRecordKeyStr(10, 'k'); + std::string bigRecordValStr(500, 'v'); + + Add(hashTable, bigRecordKeyStr, bigRecordValStr); + + // Make sure all the previously inserted records are evicted. + for (const auto& pair : c_keyValuePairs) { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(hashTable, pair.first, value)); + } + + // Make sure the big record is inserted. + BOOST_CHECK(CheckRecord(hashTable, bigRecordKeyStr, bigRecordValStr)); + + Utils::ValidateCounters( + perfData, + { + {HashTablePerfCounter::RecordsCount, 1}, + {HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + (1 * recordOverhead)}, + {HashTablePerfCounter::EvictedRecordsCount, c_keyValuePairs.size()}, + }); +} + +BOOST_FIXTURE_TEST_CASE(EvcitRecordsBasedOnAccessStatusTest, + CacheHashTableTestFixture) { + const std::uint64_t c_maxCacheSizeInBytes = + 2000 + m_hashTable.m_perfData.Get(HashTablePerfCounter::TotalIndexSize); + const seconds c_recordTimeToLive{5}; + + CacheHashTable hashTable(m_hashTable, m_epochManager, c_maxCacheSizeInBytes, + c_recordTimeToLive, false); + + constexpr std::uint32_t c_valueSize = 100; + const std::string c_valStr(c_valueSize, 'v'); + const auto& perfData = hashTable.GetPerfData(); + std::uint16_t key = 1; + + while ((static_cast( + perfData.Get(HashTablePerfCounter::TotalIndexSize)) + + perfData.Get(HashTablePerfCounter::TotalKeySize) + + perfData.Get(HashTablePerfCounter::TotalValueSize) + c_valueSize) < + c_maxCacheSizeInBytes) { + std::stringstream ss; + ss << "key" << key; + Add(hashTable, ss.str(), c_valStr); + ++key; + } + + // Make sure no eviction happened. + BOOST_CHECK_EQUAL(m_epochManager.m_numRegisterActionsCalled, 0U); + + // Look up with the "key1" key to update the access state. + BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr)); + + // Now add a new key, which triggers an eviction, but deletes other records + // than the "key1" record. + Add(hashTable, "newkey", c_valStr); + + // Now, eviction should have happened. + BOOST_CHECK_GE(m_epochManager.m_numRegisterActionsCalled, 1U); + + // The "key1" record should not have been evicted. + BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr)); + + // Make sure the new key is actually added. + BOOST_CHECK(CheckRecord(hashTable, "newkey", c_valStr)); +} + +// This is similar to the one in ReadWriteHashTableTest, but necessary since +// cache store adds the meta values. +BOOST_FIXTURE_TEST_CASE(FixedKeyValueHashTableTest, CacheHashTableTestFixture) { + // Fixed 4 byte keys and 6 byte values. + std::vector settings = { + HashTable::Setting{100, 200, 4, 0}, HashTable::Setting{100, 200, 0, 6}, + HashTable::Setting{100, 200, 4, 6}}; + + for (const auto& setting : settings) { // Don't care about evict in this test case, so make the cache size big. constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; - constexpr seconds c_recordTimeToLive{ 20U }; + constexpr seconds c_recordTimeToLive{20U}; - CacheHashTable hashTable( - m_hashTable, - m_epochManager, - c_maxCacheSizeInBytes, - c_recordTimeToLive, - false); + HashTable hashTable{setting, m_allocator}; + CacheHashTable writableHashTable{hashTable, m_epochManager, + c_maxCacheSizeInBytes, c_recordTimeToLive, + false}; - const std::vector> c_keyValuePairs = - { - { "key1", "value1" }, - { "key2", "value2" }, - { "key3", "value3" }, - { "key4", "value4" }, - { "key5", "value5" } - }; + ReadOnlyCacheHashTable readOnlyHashTable{hashTable, c_recordTimeToLive}; - // Add 5 records at a different epoch time (10 seconds increment). - for (const auto& pair : c_keyValuePairs) - { - MockClock::IncrementEpochTime(seconds{ 10 }); - Add(hashTable, pair.first, pair.second); + constexpr std::uint8_t c_numRecords = 10; - // Make sure the records can be retrieved right away. The record has not been - // expired since the clock hasn't moved yet. - BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + // Add records. + for (std::uint8_t i = 0; i < c_numRecords; ++i) { + Add(writableHashTable, "key" + std::to_string(i), + "value" + std::to_string(i)); } - const auto& perfData = hashTable.GetPerfData(); - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::CacheHitCount, 5 } - }); + Utils::ValidateCounters(writableHashTable.GetPerfData(), + {{HashTablePerfCounter::RecordsCount, 10}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::TotalKeySize, 40}, + {HashTablePerfCounter::TotalValueSize, 100}, + {HashTablePerfCounter::MinKeySize, 4}, + {HashTablePerfCounter::MaxKeySize, 4}, + {HashTablePerfCounter::MinValueSize, 10}, + {HashTablePerfCounter::MaxValueSize, 10}}); - // Now we have the following data sets: - // | Key | Value | Creation time | - // | key1 | value1 | 10 | - // | key2 | value2 | 20 | - // | key3 | value3 | 30 | - // | key4 | value4 | 40 | - // | key5 | value5 | 50 | - // And the current clock is at 50. + // Validate all the records added. + for (std::uint8_t i = 0; i < c_numRecords; ++i) { + CheckRecord(readOnlyHashTable, "key" + std::to_string(i), + "value" + std::to_string(i)); + } - // Do look ups and check expired records. - for (const auto& pair : c_keyValuePairs) - { + // Remove first half of the records. + for (std::uint8_t i = 0; i < c_numRecords / 2; ++i) { + Remove(writableHashTable, "key" + std::to_string(i)); + } + + Utils::ValidateCounters(writableHashTable.GetPerfData(), + {{HashTablePerfCounter::RecordsCount, 5}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::TotalKeySize, 20}, + {HashTablePerfCounter::TotalValueSize, 50}}); + + // Verify the records. + for (std::uint8_t i = 0; i < c_numRecords; ++i) { + if (i < (c_numRecords / 2)) { IReadOnlyHashTable::Value value; - // Our time to live value is 20, so key0 and key0 records should be expired. - if (pair.first == "key1" || pair.first == "key2") - { - BOOST_CHECK(!Get(hashTable, pair.first, value)); - } - else - { - BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); - } + BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value)); + } else { + CheckRecord(readOnlyHashTable, "key" + std::to_string(i), + "value" + std::to_string(i)); + } } - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::CacheHitCount, 8 }, - { HashTablePerfCounter::CacheMissCount, 2 } - }); + // Expire all the records. + MockClock::IncrementEpochTime(seconds{100}); - MockClock::IncrementEpochTime(seconds{ 100 }); - - // All the records should be expired now. - for (const auto& pair : c_keyValuePairs) - { - IReadOnlyHashTable::Value value; - BOOST_CHECK(!Get(hashTable, pair.first, value)); - } - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::CacheHitCount, 8 }, - { HashTablePerfCounter::CacheMissCount, 7 } - }); -} - - -BOOST_FIXTURE_TEST_CASE(CacheHashTableIteratorTest, CacheHashTableTestFixture) -{ - // Don't care about evict in this test case, so make the cache size big. - constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; - constexpr seconds c_recordTimeToLive{ 20U }; - - CacheHashTable hashTable( - m_hashTable, - m_epochManager, - c_maxCacheSizeInBytes, - c_recordTimeToLive, - false); - - const std::vector c_keys = { "key1", "key2", "key3", "key4", "key5" }; - const std::vector c_vals = { "val1", "val2", "val3", "val4", "val5" }; - - // Add 5 records at a different epoch time (3 seconds increment). - for (std::size_t i = 0; i < c_keys.size(); ++i) - { - MockClock::IncrementEpochTime(seconds{ 3 }); - Add(hashTable, c_keys[i], c_vals[i]); - } - - // Now we have the following data sets: - // | Key | Value | Creation time | - // | key1 | value1 | 3 | - // | key2 | value2 | 6 | - // | key3 | value3 | 9 | - // | key4 | value4 | 12 | - // | key5 | value5 | 15 | - // And the current clock is at 15. - - auto iterator = hashTable.GetIterator(); - std::uint16_t numRecords = 0; - while (iterator->MoveNext()) - { - ++numRecords; - BOOST_CHECK(Exist(iterator->GetKey(), c_keys)); - BOOST_CHECK(Exist(iterator->GetValue(), c_vals)); - } - - BOOST_CHECK_EQUAL(numRecords, 5); - - // The clock becomes 30 and key1, key2 and key3 should expire. - MockClock::IncrementEpochTime(seconds{ 15 }); - - iterator = hashTable.GetIterator(); - numRecords = 0; - while (iterator->MoveNext()) - { - ++numRecords; - BOOST_CHECK( - Exist( - iterator->GetKey(), - std::vector{ c_keys.cbegin() + 2, c_keys.cend() })); - BOOST_CHECK( - Exist( - iterator->GetValue(), - std::vector{ c_vals.cbegin() + 2, c_vals.cend() })); - } - - BOOST_CHECK_EQUAL(numRecords, 2); - - // The clock becomes 40 and all records should be expired now. - MockClock::IncrementEpochTime(seconds{ 10 }); - - iterator = hashTable.GetIterator(); - while (iterator->MoveNext()) - { - BOOST_CHECK(false); - } -} - - -BOOST_FIXTURE_TEST_CASE(TimeBasedEvictionTest, CacheHashTableTestFixture) -{ - // We only care about time-based eviction in this test, so make the cache size big. - constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; - constexpr seconds c_recordTimeToLive{ 10U }; - - // Hash table with one bucket makes testing the time-based eviction easy. - HashTable internalHashTable{ HashTable::Setting{ 1 }, m_allocator }; - CacheHashTable hashTable( - internalHashTable, - m_epochManager, - c_maxCacheSizeInBytes, - c_recordTimeToLive, - true); - - const std::vector> c_keyValuePairs = - { - { "key1", "value1" }, - { "key2", "value2" }, - { "key3", "value3" }, - { "key4", "value4" }, - { "key5", "value5" } - }; - - for (const auto& pair : c_keyValuePairs) - { - Add(hashTable, pair.first, pair.second); - BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); - } - - const auto& perfData = hashTable.GetPerfData(); - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::CacheHitCount, 5 }, - { HashTablePerfCounter::RecordsCount, 5 }, - { HashTablePerfCounter::EvictedRecordsCount, 0 }, - }); - - MockClock::IncrementEpochTime(seconds{ 20 }); - - // All the records should be expired now. - for (const auto& pair : c_keyValuePairs) - { - IReadOnlyHashTable::Value value; - BOOST_CHECK(!Get(hashTable, pair.first, value)); - } - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::CacheHitCount, 5 }, - { HashTablePerfCounter::CacheMissCount, 5 }, - { HashTablePerfCounter::RecordsCount, 5 }, - { HashTablePerfCounter::EvictedRecordsCount, 0 }, - }); - - // Now try to add one record and all the expired records should be evicted. - const auto& keyValuePair = c_keyValuePairs[0]; - Add(hashTable, keyValuePair.first, keyValuePair.second); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 1 }, - { HashTablePerfCounter::EvictedRecordsCount, 5 }, - }); -} - - -BOOST_FIXTURE_TEST_CASE(EvcitAllRecordsTest, CacheHashTableTestFixture) -{ - const auto& perfData = m_hashTable.m_perfData; - const auto initialTotalIndexSize = perfData.Get(HashTablePerfCounter::TotalIndexSize); - const std::uint64_t c_maxCacheSizeInBytes = 500 + initialTotalIndexSize; - constexpr seconds c_recordTimeToLive{ 5 }; - - CacheHashTable hashTable{ - m_hashTable, - m_epochManager, - c_maxCacheSizeInBytes, - c_recordTimeToLive, - false }; - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::EvictedRecordsCount, 0 }, - }); - - const std::vector> c_keyValuePairs = - { - { "key1", "value1" }, - { "key2", "value2" }, - { "key3", "value3" }, - { "key4", "value4" }, - { "key5", "value5" } - }; - - for (const auto& pair : c_keyValuePairs) - { - Add(hashTable, pair.first, pair.second); - } - - using L4::HashTable::RecordSerializer; - - // Variable key/value sizes. - const auto recordOverhead = RecordSerializer{ 0U, 0U }.CalculateRecordOverhead(); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, c_keyValuePairs.size() }, - { HashTablePerfCounter::TotalIndexSize, initialTotalIndexSize + (c_keyValuePairs.size() * recordOverhead) }, - { HashTablePerfCounter::EvictedRecordsCount, 0 }, - }); - - // Make sure all data records added are present and update the access status for each - // record in order to test that accessed records are deleted when it's under memory constraint. - for (const auto& pair : c_keyValuePairs) - { - BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); - } - - // Now insert a record that will force all the records to be evicted due to size. - std::string bigRecordKeyStr(10, 'k'); - std::string bigRecordValStr(500, 'v'); - - Add(hashTable, bigRecordKeyStr, bigRecordValStr); - - // Make sure all the previously inserted records are evicted. - for (const auto& pair : c_keyValuePairs) - { - IReadOnlyHashTable::Value value; - BOOST_CHECK(!Get(hashTable, pair.first, value)); - } - - // Make sure the big record is inserted. - BOOST_CHECK(CheckRecord(hashTable, bigRecordKeyStr, bigRecordValStr)); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 1 }, - { HashTablePerfCounter::TotalIndexSize, initialTotalIndexSize + (1 * recordOverhead) }, - { HashTablePerfCounter::EvictedRecordsCount, c_keyValuePairs.size() }, - }); -} - - -BOOST_FIXTURE_TEST_CASE(EvcitRecordsBasedOnAccessStatusTest, CacheHashTableTestFixture) -{ - const std::uint64_t c_maxCacheSizeInBytes - = 2000 + m_hashTable.m_perfData.Get(HashTablePerfCounter::TotalIndexSize); - const seconds c_recordTimeToLive{ 5 }; - - CacheHashTable hashTable( - m_hashTable, - m_epochManager, - c_maxCacheSizeInBytes, - c_recordTimeToLive, - false); - - constexpr std::uint32_t c_valueSize = 100; - const std::string c_valStr(c_valueSize, 'v'); - const auto& perfData = hashTable.GetPerfData(); - std::uint16_t key = 1; - - while ((static_cast(perfData.Get(HashTablePerfCounter::TotalIndexSize)) - + perfData.Get(HashTablePerfCounter::TotalKeySize) - + perfData.Get(HashTablePerfCounter::TotalValueSize) - + c_valueSize) - < c_maxCacheSizeInBytes) - { - std::stringstream ss; - ss << "key" << key; - Add(hashTable, ss.str(), c_valStr); - ++key; - } - - // Make sure no eviction happened. - BOOST_CHECK_EQUAL(m_epochManager.m_numRegisterActionsCalled, 0U); - - // Look up with the "key1" key to update the access state. - BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr)); - - // Now add a new key, which triggers an eviction, but deletes other records than the "key1" record. - Add(hashTable, "newkey", c_valStr); - - // Now, eviction should have happened. - BOOST_CHECK_GE(m_epochManager.m_numRegisterActionsCalled, 1U); - - // The "key1" record should not have been evicted. - BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr)); - - // Make sure the new key is actually added. - BOOST_CHECK(CheckRecord(hashTable, "newkey", c_valStr)); -} - - -// This is similar to the one in ReadWriteHashTableTest, but necessary since cache store adds the meta values. -BOOST_FIXTURE_TEST_CASE(FixedKeyValueHashTableTest, CacheHashTableTestFixture) -{ - // Fixed 4 byte keys and 6 byte values. - std::vector settings = - { - HashTable::Setting{ 100, 200, 4, 0 }, - HashTable::Setting{ 100, 200, 0, 6 }, - HashTable::Setting{ 100, 200, 4, 6 } - }; - - for (const auto& setting : settings) - { - // Don't care about evict in this test case, so make the cache size big. - constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; - constexpr seconds c_recordTimeToLive{ 20U }; - - HashTable hashTable{ setting, m_allocator }; - CacheHashTable writableHashTable{ - hashTable, - m_epochManager, - c_maxCacheSizeInBytes, - c_recordTimeToLive, - false }; - - ReadOnlyCacheHashTable readOnlyHashTable{ hashTable, c_recordTimeToLive }; - - constexpr std::uint8_t c_numRecords = 10; - - // Add records. - for (std::uint8_t i = 0; i < c_numRecords; ++i) - { - Add(writableHashTable, "key" + std::to_string(i), "value" + std::to_string(i)); - } - - Utils::ValidateCounters( - writableHashTable.GetPerfData(), - { - { HashTablePerfCounter::RecordsCount, 10 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::TotalKeySize, 40 }, - { HashTablePerfCounter::TotalValueSize, 100 }, - { HashTablePerfCounter::MinKeySize, 4 }, - { HashTablePerfCounter::MaxKeySize, 4 }, - { HashTablePerfCounter::MinValueSize, 10 }, - { HashTablePerfCounter::MaxValueSize, 10 } - }); - - // Validate all the records added. - for (std::uint8_t i = 0; i < c_numRecords; ++i) - { - CheckRecord(readOnlyHashTable, "key" + std::to_string(i), "value" + std::to_string(i)); - } - - // Remove first half of the records. - for (std::uint8_t i = 0; i < c_numRecords / 2; ++i) - { - Remove(writableHashTable, "key" + std::to_string(i)); - } - - Utils::ValidateCounters( - writableHashTable.GetPerfData(), - { - { HashTablePerfCounter::RecordsCount, 5 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::TotalKeySize, 20 }, - { HashTablePerfCounter::TotalValueSize, 50 } - }); - - // Verify the records. - for (std::uint8_t i = 0; i < c_numRecords; ++i) - { - if (i < (c_numRecords / 2)) - { - IReadOnlyHashTable::Value value; - BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value)); - } - else - { - CheckRecord(readOnlyHashTable, "key" + std::to_string(i), "value" + std::to_string(i)); - } - } - - // Expire all the records. - MockClock::IncrementEpochTime(seconds{ 100 }); - - // Verify the records. - for (std::uint8_t i = 0; i < c_numRecords; ++i) - { - IReadOnlyHashTable::Value value; - BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value)); - } + // Verify the records. + for (std::uint8_t i = 0; i < c_numRecords; ++i) { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value)); } + } } BOOST_AUTO_TEST_SUITE_END() -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/CheckedAllocator.h b/Unittests/CheckedAllocator.h index 3bc68bd..d3d2fd3 100644 --- a/Unittests/CheckedAllocator.h +++ b/Unittests/CheckedAllocator.h @@ -1,68 +1,56 @@ #pragma once +#include #include #include -#include -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { -struct AllocationAddressHolder : public std::set -{ - ~AllocationAddressHolder() - { - BOOST_REQUIRE(empty()); - } +struct AllocationAddressHolder : public std::set { + ~AllocationAddressHolder() { BOOST_REQUIRE(empty()); } }; template -class CheckedAllocator : public std::allocator -{ -public: - using Base = std::allocator; - using pointer = typename Base::pointer; +class CheckedAllocator : public std::allocator { + public: + using Base = std::allocator; + using pointer = typename Base::pointer; - template - struct rebind - { - typedef CheckedAllocator other; - }; + template + struct rebind { + typedef CheckedAllocator other; + }; - CheckedAllocator() - : m_allocationAddresses{ std::make_shared() } - {} + CheckedAllocator() + : m_allocationAddresses{std::make_shared()} {} - CheckedAllocator(const CheckedAllocator&) = default; + CheckedAllocator(const CheckedAllocator&) = default; - template - CheckedAllocator(const CheckedAllocator& other) - : m_allocationAddresses{ other.m_allocationAddresses } - {} + template + CheckedAllocator(const CheckedAllocator& other) + : m_allocationAddresses{other.m_allocationAddresses} {} - template - CheckedAllocator& operator=(const CheckedAllocator& other) - { - m_allocationAddresses = other.m_allocationAddresses; - return (*this); - } + template + CheckedAllocator& operator=(const CheckedAllocator& other) { + m_allocationAddresses = other.m_allocationAddresses; + return (*this); + } - pointer allocate(std::size_t count, std::allocator::const_pointer hint = 0) - { - auto address = Base::allocate(count, hint); - BOOST_REQUIRE(m_allocationAddresses->insert(address).second); - return address; - } + pointer allocate(std::size_t count, + std::allocator::const_pointer hint = 0) { + auto address = Base::allocate(count, hint); + BOOST_REQUIRE(m_allocationAddresses->insert(address).second); + return address; + } - void deallocate(pointer ptr, std::size_t count) - { - BOOST_REQUIRE(m_allocationAddresses->erase(ptr) == 1); - Base::deallocate(ptr, count); - } + void deallocate(pointer ptr, std::size_t count) { + BOOST_REQUIRE(m_allocationAddresses->erase(ptr) == 1); + Base::deallocate(ptr, count); + } - std::shared_ptr m_allocationAddresses; + std::shared_ptr m_allocationAddresses; }; -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/ConnectionMonitorTest.cpp b/Unittests/ConnectionMonitorTest.cpp index 31b8de1..e0498e7 100644 --- a/Unittests/ConnectionMonitorTest.cpp +++ b/Unittests/ConnectionMonitorTest.cpp @@ -1,80 +1,82 @@ #include -#include #include -#include "Utils.h" +#include #include "L4/Interprocess/Connection/ConnectionMonitor.h" #include "L4/Interprocess/Connection/EndPointInfoUtils.h" +#include "Utils.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { BOOST_AUTO_TEST_SUITE(ConnectionMonitorTests) -BOOST_AUTO_TEST_CASE(ConnectionMonitorTest) -{ - std::vector endPointsDisconnected; - std::mutex lock; - std::condition_variable cv; +BOOST_AUTO_TEST_CASE(ConnectionMonitorTest) { + std::vector endPointsDisconnected; + std::mutex lock; + std::condition_variable cv; - auto server = std::make_shared(); + auto server = std::make_shared(); - auto noOpCallback = [](const auto&) { throw std::runtime_error("This will not be called."); }; - auto callback = [&](const auto& endPoint) - { - std::unique_lock guard{ lock }; - endPointsDisconnected.emplace_back(endPoint); - cv.notify_one(); - }; + auto noOpCallback = [](const auto&) { + throw std::runtime_error("This will not be called."); + }; + auto callback = [&](const auto& endPoint) { + std::unique_lock guard{lock}; + endPointsDisconnected.emplace_back(endPoint); + cv.notify_one(); + }; - auto client1 = std::make_shared(); - client1->Register(server->GetLocalEndPointInfo(), noOpCallback); - server->Register(client1->GetLocalEndPointInfo(), callback); + auto client1 = + std::make_shared(); + client1->Register(server->GetLocalEndPointInfo(), noOpCallback); + server->Register(client1->GetLocalEndPointInfo(), callback); - // Registering the same end point is not allowed. - CHECK_EXCEPTION_THROWN_WITH_MESSAGE( - server->Register(client1->GetLocalEndPointInfo(), noOpCallback); , - "Duplicate end point found."); + // Registering the same end point is not allowed. + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + server->Register(client1->GetLocalEndPointInfo(), noOpCallback); + , "Duplicate end point found."); - auto client2 = std::make_shared(); - client2->Register(server->GetLocalEndPointInfo(), callback); - server->Register(client2->GetLocalEndPointInfo(), noOpCallback); + auto client2 = + std::make_shared(); + client2->Register(server->GetLocalEndPointInfo(), callback); + server->Register(client2->GetLocalEndPointInfo(), noOpCallback); - auto client3 = std::make_shared(); - client3->Register(server->GetLocalEndPointInfo(), callback); - server->Register(client3->GetLocalEndPointInfo(), noOpCallback); + auto client3 = + std::make_shared(); + client3->Register(server->GetLocalEndPointInfo(), callback); + server->Register(client3->GetLocalEndPointInfo(), noOpCallback); - BOOST_CHECK_EQUAL(server->GetRemoteConnectionsCount(), 3U); + BOOST_CHECK_EQUAL(server->GetRemoteConnectionsCount(), 3U); - // Kill client1 and check if the callback is called on the server side. - auto client1EndPointInfo = client1->GetLocalEndPointInfo(); - client1.reset(); - { - std::unique_lock guard{ lock }; - cv.wait(guard, [&] { return endPointsDisconnected.size() >= 1U; }); - BOOST_REQUIRE_EQUAL(endPointsDisconnected.size(), 1U); - BOOST_CHECK(endPointsDisconnected[0] == client1EndPointInfo); - endPointsDisconnected.clear(); - BOOST_CHECK_EQUAL(server->GetRemoteConnectionsCount(), 2U); - } + // Kill client1 and check if the callback is called on the server side. + auto client1EndPointInfo = client1->GetLocalEndPointInfo(); + client1.reset(); + { + std::unique_lock guard{lock}; + cv.wait(guard, [&] { return endPointsDisconnected.size() >= 1U; }); + BOOST_REQUIRE_EQUAL(endPointsDisconnected.size(), 1U); + BOOST_CHECK(endPointsDisconnected[0] == client1EndPointInfo); + endPointsDisconnected.clear(); + BOOST_CHECK_EQUAL(server->GetRemoteConnectionsCount(), 2U); + } - // Now kill server and check if both callbacks in client2 and client3 are called. - auto serverEndPointInfo = server->GetLocalEndPointInfo(); - server.reset(); - { - std::unique_lock guard{ lock }; - cv.wait(guard, [&] { return endPointsDisconnected.size() >= 2U; }); - BOOST_REQUIRE_EQUAL(endPointsDisconnected.size(), 2U); - BOOST_CHECK(endPointsDisconnected[0] == serverEndPointInfo); - BOOST_CHECK(endPointsDisconnected[1] == serverEndPointInfo); - endPointsDisconnected.clear(); - BOOST_CHECK_EQUAL(client2->GetRemoteConnectionsCount(), 0U); - BOOST_CHECK_EQUAL(client3->GetRemoteConnectionsCount(), 0U); - } + // Now kill server and check if both callbacks in client2 and client3 are + // called. + auto serverEndPointInfo = server->GetLocalEndPointInfo(); + server.reset(); + { + std::unique_lock guard{lock}; + cv.wait(guard, [&] { return endPointsDisconnected.size() >= 2U; }); + BOOST_REQUIRE_EQUAL(endPointsDisconnected.size(), 2U); + BOOST_CHECK(endPointsDisconnected[0] == serverEndPointInfo); + BOOST_CHECK(endPointsDisconnected[1] == serverEndPointInfo); + endPointsDisconnected.clear(); + BOOST_CHECK_EQUAL(client2->GetRemoteConnectionsCount(), 0U); + BOOST_CHECK_EQUAL(client3->GetRemoteConnectionsCount(), 0U); + } } BOOST_AUTO_TEST_SUITE_END() -} // namespace UnitTests -} // namespace L4 \ No newline at end of file +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/EpochManagerTest.cpp b/Unittests/EpochManagerTest.cpp index 01b6cc6..cfe7b87 100644 --- a/Unittests/EpochManagerTest.cpp +++ b/Unittests/EpochManagerTest.cpp @@ -1,187 +1,190 @@ #include #include -#include "Utils.h" -#include "L4/Epoch/EpochQueue.h" #include "L4/Epoch/EpochActionManager.h" +#include "L4/Epoch/EpochQueue.h" #include "L4/LocalMemory/EpochManager.h" #include "L4/Log/PerfCounter.h" #include "L4/Utils/Lock.h" +#include "Utils.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { BOOST_AUTO_TEST_SUITE(EpochManagerTests) -BOOST_AUTO_TEST_CASE(EpochRefManagerTest) -{ - std::uint64_t currentEpochCounter = 5U; - const std::uint32_t c_epochQueueSize = 100U; +BOOST_AUTO_TEST_CASE(EpochRefManagerTest) { + std::uint64_t currentEpochCounter = 5U; + const std::uint32_t c_epochQueueSize = 100U; - using EpochQueue = EpochQueue< - boost::shared_lock_guard, - std::lock_guard>; + using EpochQueue = + EpochQueue, + std::lock_guard>; - EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize); + EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize); - // Initially the ref count at the current epoch counter should be 0. - BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U); + // Initially the ref count at the current epoch counter should be 0. + BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U); - EpochRefManager epochManager(epochQueue); + EpochRefManager epochManager(epochQueue); - BOOST_CHECK_EQUAL(epochManager.AddRef(), currentEpochCounter); + BOOST_CHECK_EQUAL(epochManager.AddRef(), currentEpochCounter); - // Validate that a reference count is incremented at the current epoch counter. - BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 1U); + // Validate that a reference count is incremented at the current epoch + // counter. + BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 1U); - epochManager.RemoveRef(currentEpochCounter); + epochManager.RemoveRef(currentEpochCounter); - // Validate that a reference count is back to 0. - BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U); + // Validate that a reference count is back to 0. + BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U); - // Decrementing a reference counter when it is already 0 will result in an exception. - CHECK_EXCEPTION_THROWN_WITH_MESSAGE( - epochManager.RemoveRef(currentEpochCounter);, - "Reference counter is invalid."); + // Decrementing a reference counter when it is already 0 will result in an + // exception. + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + epochManager.RemoveRef(currentEpochCounter); + , "Reference counter is invalid."); } +BOOST_AUTO_TEST_CASE(EpochCounterManagerTest) { + std::uint64_t currentEpochCounter = 0U; + const std::uint32_t c_epochQueueSize = 100U; -BOOST_AUTO_TEST_CASE(EpochCounterManagerTest) -{ - std::uint64_t currentEpochCounter = 0U; - const std::uint32_t c_epochQueueSize = 100U; + using EpochQueue = + EpochQueue, + std::lock_guard>; - using EpochQueue = EpochQueue< - boost::shared_lock_guard, - std::lock_guard>; + EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize); - EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize); + EpochCounterManager epochCounterManager(epochQueue); - EpochCounterManager epochCounterManager(epochQueue); + // If RemoveUnreferenceEpochCounters() is called when m_fonrtIndex and + // m_backIndex are the same, it will just return either value. + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), + currentEpochCounter); - // If RemoveUnreferenceEpochCounters() is called when m_fonrtIndex and m_backIndex are - // the same, it will just return either value. - BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter); + // Add two epoch counts. + ++currentEpochCounter; + ++currentEpochCounter; + epochCounterManager.AddNewEpoch(); + epochCounterManager.AddNewEpoch(); - // Add two epoch counts. - ++currentEpochCounter; - ++currentEpochCounter; - epochCounterManager.AddNewEpoch(); - epochCounterManager.AddNewEpoch(); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, 0U); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_refCounts[epochQueue.m_frontIndex], 0U); - BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, 0U); - BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); - BOOST_CHECK_EQUAL(epochQueue.m_refCounts[epochQueue.m_frontIndex], 0U); + // Since the m_frontIndex's reference count was zero, it will be incremented + // all the way to currentEpochCounter. + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), + currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); - // Since the m_frontIndex's reference count was zero, it will be incremented - // all the way to currentEpochCounter. - BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter); - BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); - BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + EpochRefManager epochRefManager(epochQueue); - EpochRefManager epochRefManager(epochQueue); + // Now add a reference at the currentEpochCounter; + const auto epochCounterReferenced = epochRefManager.AddRef(); + BOOST_CHECK_EQUAL(epochCounterReferenced, currentEpochCounter); - // Now add a reference at the currentEpochCounter; - const auto epochCounterReferenced = epochRefManager.AddRef(); - BOOST_CHECK_EQUAL(epochCounterReferenced, currentEpochCounter); + // Calling RemoveUnreferenceEpochCounters() should just return + // currentEpochCounter since m_frontIndex and m_backIndex is the same. (Not + // affected by adding a reference yet). + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), + currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); - // Calling RemoveUnreferenceEpochCounters() should just return currentEpochCounter - // since m_frontIndex and m_backIndex is the same. (Not affected by adding a reference yet). - BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter); - BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); - BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + // Add one epoch count. + ++currentEpochCounter; + epochCounterManager.AddNewEpoch(); - // Add one epoch count. - ++currentEpochCounter; - epochCounterManager.AddNewEpoch(); + // Now RemoveUnreferenceEpochCounters() should return epochCounterReferenced + // because of the reference count. + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), + epochCounterReferenced); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, epochCounterReferenced); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); - // Now RemoveUnreferenceEpochCounters() should return epochCounterReferenced because - // of the reference count. - BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), epochCounterReferenced); - BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, epochCounterReferenced); - BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + // Remove the reference. + epochRefManager.RemoveRef(epochCounterReferenced); - // Remove the reference. - epochRefManager.RemoveRef(epochCounterReferenced); - - // Now RemoveUnreferenceEpochCounters() should return currentEpochCounter and m_frontIndex - // should be in sync with m_backIndex. - BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter); - BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); - BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + // Now RemoveUnreferenceEpochCounters() should return currentEpochCounter and + // m_frontIndex should be in sync with m_backIndex. + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), + currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); } +BOOST_AUTO_TEST_CASE(EpochActionManagerTest) { + EpochActionManager actionManager(2U); -BOOST_AUTO_TEST_CASE(EpochActionManagerTest) -{ - EpochActionManager actionManager(2U); + bool isAction1Called = false; + bool isAction2Called = false; - bool isAction1Called = false; - bool isAction2Called = false; + auto action1 = [&]() { isAction1Called = true; }; + auto action2 = [&]() { isAction2Called = true; }; - auto action1 = [&]() { isAction1Called = true; }; - auto action2 = [&]() { isAction2Called = true; }; + // Register action1 and action2 at epoch count 5 and 6 respectively. + actionManager.RegisterAction(5U, action1); + actionManager.RegisterAction(6U, action2); - // Register action1 and action2 at epoch count 5 and 6 respectively. - actionManager.RegisterAction(5U, action1); - actionManager.RegisterAction(6U, action2); + BOOST_CHECK(!isAction1Called && !isAction2Called); - BOOST_CHECK(!isAction1Called && !isAction2Called); + actionManager.PerformActions(4); + BOOST_CHECK(!isAction1Called && !isAction2Called); - actionManager.PerformActions(4); - BOOST_CHECK(!isAction1Called && !isAction2Called); + actionManager.PerformActions(5); + BOOST_CHECK(!isAction1Called && !isAction2Called); - actionManager.PerformActions(5); - BOOST_CHECK(!isAction1Called && !isAction2Called); + actionManager.PerformActions(6); + BOOST_CHECK(isAction1Called && !isAction2Called); - actionManager.PerformActions(6); - BOOST_CHECK(isAction1Called && !isAction2Called); - - actionManager.PerformActions(7); - BOOST_CHECK(isAction1Called && isAction2Called); + actionManager.PerformActions(7); + BOOST_CHECK(isAction1Called && isAction2Called); } +BOOST_AUTO_TEST_CASE(EpochManagerTest) { + ServerPerfData perfData; + LocalMemory::EpochManager epochManager( + EpochManagerConfig(100000U, std::chrono::milliseconds(5U), 1U), perfData); -BOOST_AUTO_TEST_CASE(EpochManagerTest) -{ - ServerPerfData perfData; - LocalMemory::EpochManager epochManager( - EpochManagerConfig(100000U, std::chrono::milliseconds(5U), 1U), - perfData); + std::atomic isActionCalled{false}; + auto action = [&]() { isActionCalled = true; }; - std::atomic isActionCalled{ false }; - auto action = [&]() { isActionCalled = true; }; + auto epochCounterReferenced = epochManager.GetEpochRefManager().AddRef(); - auto epochCounterReferenced = epochManager.GetEpochRefManager().AddRef(); + epochManager.RegisterAction(action); - epochManager.RegisterAction(action); + // Justification for using sleep_for in unit tests: + // - EpochManager already uses an internal thread which wakes up and perform a + // task in a given interval and when the class is destroyed, there is a + // mechanism for waiting for the thread anyway. It's more crucial to test the + // end to end scenario this way. + // - The overall execution time for this test is less than 50 milliseconds. + auto initialEpochCounter = + perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue); + while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - + initialEpochCounter < + 2) { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + } - // Justification for using sleep_for in unit tests: - // - EpochManager already uses an internal thread which wakes up and perform a task - // in a given interval and when the class is destroyed, there is a mechanism for - // waiting for the thread anyway. It's more crucial to test the end to end scenario this way. - // - The overall execution time for this test is less than 50 milliseconds. - auto initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue); - while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2) - { - std::this_thread::sleep_for(std::chrono::milliseconds(5)); - } + BOOST_CHECK(!isActionCalled); - BOOST_CHECK(!isActionCalled); + epochManager.GetEpochRefManager().RemoveRef(epochCounterReferenced); - epochManager.GetEpochRefManager().RemoveRef(epochCounterReferenced); + initialEpochCounter = + perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue); + while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - + initialEpochCounter < + 2) { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + } - initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue); - while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2) - { - std::this_thread::sleep_for(std::chrono::milliseconds(5)); - } - - BOOST_CHECK(isActionCalled); + BOOST_CHECK(isActionCalled); } BOOST_AUTO_TEST_SUITE_END() -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/HashTableManagerTest.cpp b/Unittests/HashTableManagerTest.cpp index c549f79..7c0d4fb 100644 --- a/Unittests/HashTableManagerTest.cpp +++ b/Unittests/HashTableManagerTest.cpp @@ -1,128 +1,111 @@ #include -#include "Utils.h" -#include "Mocks.h" #include "L4/HashTable/Config.h" #include "L4/HashTable/IHashTable.h" #include "L4/LocalMemory/HashTableManager.h" +#include "Mocks.h" +#include "Utils.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { -class HashTableManagerTestsFixture -{ -protected: - template - void ValidateRecord( - const Store& store, - const char* expectedKeyStr, - const char* expectedValueStr) - { - IReadOnlyHashTable::Value actualValue; - auto expectedValue = Utils::ConvertFromString(expectedValueStr); - BOOST_CHECK(store.Get(Utils::ConvertFromString(expectedKeyStr), actualValue)); - BOOST_CHECK(actualValue.m_size == expectedValue.m_size); - BOOST_CHECK(!memcmp(actualValue.m_data, expectedValue.m_data, expectedValue.m_size)); - } +class HashTableManagerTestsFixture { + protected: + template + void ValidateRecord(const Store& store, + const char* expectedKeyStr, + const char* expectedValueStr) { + IReadOnlyHashTable::Value actualValue; + auto expectedValue = + Utils::ConvertFromString(expectedValueStr); + BOOST_CHECK(store.Get( + Utils::ConvertFromString(expectedKeyStr), + actualValue)); + BOOST_CHECK(actualValue.m_size == expectedValue.m_size); + BOOST_CHECK(!memcmp(actualValue.m_data, expectedValue.m_data, + expectedValue.m_size)); + } - MockEpochManager m_epochManager; - std::allocator m_allocator; + MockEpochManager m_epochManager; + std::allocator m_allocator; }; BOOST_FIXTURE_TEST_SUITE(HashTableManagerTests, HashTableManagerTestsFixture) -BOOST_AUTO_TEST_CASE(HashTableManagerTest) -{ - LocalMemory::HashTableManager htManager; - const auto ht1Index = htManager.Add( - HashTableConfig("HashTable1", HashTableConfig::Setting(100U)), - m_epochManager, - m_allocator); - const auto ht2Index = htManager.Add( - HashTableConfig("HashTable2", HashTableConfig::Setting(200U)), - m_epochManager, - m_allocator); +BOOST_AUTO_TEST_CASE(HashTableManagerTest) { + LocalMemory::HashTableManager htManager; + const auto ht1Index = htManager.Add( + HashTableConfig("HashTable1", HashTableConfig::Setting(100U)), + m_epochManager, m_allocator); + const auto ht2Index = htManager.Add( + HashTableConfig("HashTable2", HashTableConfig::Setting(200U)), + m_epochManager, m_allocator); - { - auto& hashTable1 = htManager.GetHashTable("HashTable1"); - hashTable1.Add( - Utils::ConvertFromString("HashTable1Key"), - Utils::ConvertFromString("HashTable1Value")); + { + auto& hashTable1 = htManager.GetHashTable("HashTable1"); + hashTable1.Add( + Utils::ConvertFromString("HashTable1Key"), + Utils::ConvertFromString("HashTable1Value")); - auto& hashTable2 = htManager.GetHashTable("HashTable2"); - hashTable2.Add( - Utils::ConvertFromString("HashTable2Key"), - Utils::ConvertFromString("HashTable2Value")); - } + auto& hashTable2 = htManager.GetHashTable("HashTable2"); + hashTable2.Add( + Utils::ConvertFromString("HashTable2Key"), + Utils::ConvertFromString("HashTable2Value")); + } - ValidateRecord( - htManager.GetHashTable(ht1Index), - "HashTable1Key", - "HashTable1Value"); + ValidateRecord(htManager.GetHashTable(ht1Index), "HashTable1Key", + "HashTable1Value"); - ValidateRecord( - htManager.GetHashTable(ht2Index), - "HashTable2Key", - "HashTable2Value"); + ValidateRecord(htManager.GetHashTable(ht2Index), "HashTable2Key", + "HashTable2Value"); } +BOOST_AUTO_TEST_CASE(HashTableManagerTestForSerialzation) { + HashTableConfig htConfig{"HashTable1", HashTableConfig::Setting(100U)}; + std::ostringstream outStream; -BOOST_AUTO_TEST_CASE(HashTableManagerTestForSerialzation) -{ - HashTableConfig htConfig{ "HashTable1", HashTableConfig::Setting(100U) }; - std::ostringstream outStream; + std::vector> testData; + for (std::int32_t i = 0; i < 10; ++i) { + testData.emplace_back("key" + std::to_string(i), "val" + std::to_string(i)); + } - std::vector> testData; - for (std::int32_t i = 0; i < 10; ++i) - { - testData.emplace_back( - "key" + std::to_string(i), - "val" + std::to_string(i)); + // Serialize a hash table. + { + LocalMemory::HashTableManager htManager; + const auto ht1Index = htManager.Add(htConfig, m_epochManager, m_allocator); + + auto& hashTable1 = htManager.GetHashTable("HashTable1"); + + for (const auto& kvPair : testData) { + hashTable1.Add(Utils::ConvertFromString( + kvPair.first.c_str()), + Utils::ConvertFromString( + kvPair.second.c_str())); } - // Serialize a hash table. - { - LocalMemory::HashTableManager htManager; - const auto ht1Index = htManager.Add(htConfig, m_epochManager, m_allocator); + auto serializer = hashTable1.GetSerializer(); + serializer->Serialize(outStream, {}); + } - auto& hashTable1 = htManager.GetHashTable("HashTable1"); + // Deserialize the hash table. + { + htConfig.m_serializer.emplace( + std::make_shared(outStream.str())); - for (const auto& kvPair : testData) - { - hashTable1.Add( - Utils::ConvertFromString(kvPair.first.c_str()), - Utils::ConvertFromString(kvPair.second.c_str())); - } + LocalMemory::HashTableManager htManager; + const auto ht1Index = htManager.Add(htConfig, m_epochManager, m_allocator); - auto serializer = hashTable1.GetSerializer(); - serializer->Serialize(outStream, {}); - } - - // Deserialize the hash table. - { - htConfig.m_serializer.emplace( - std::make_shared(outStream.str())); - - LocalMemory::HashTableManager htManager; - const auto ht1Index = htManager.Add(htConfig, m_epochManager, m_allocator); - - auto& hashTable1 = htManager.GetHashTable("HashTable1"); - BOOST_CHECK_EQUAL( - hashTable1.GetPerfData().Get(HashTablePerfCounter::RecordsCount), - testData.size()); - - for (const auto& kvPair : testData) - { - ValidateRecord( - hashTable1, - kvPair.first.c_str(), - kvPair.second.c_str()); - } + auto& hashTable1 = htManager.GetHashTable("HashTable1"); + BOOST_CHECK_EQUAL( + hashTable1.GetPerfData().Get(HashTablePerfCounter::RecordsCount), + testData.size()); + + for (const auto& kvPair : testData) { + ValidateRecord(hashTable1, kvPair.first.c_str(), kvPair.second.c_str()); } + } } BOOST_AUTO_TEST_SUITE_END() -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/HashTableRecordTest.cpp b/Unittests/HashTableRecordTest.cpp index 934e835..45725a6 100644 --- a/Unittests/HashTableRecordTest.cpp +++ b/Unittests/HashTableRecordTest.cpp @@ -1,163 +1,161 @@ -#include #include +#include #include #include #include "L4/HashTable/Common/Record.h" #include "Utils.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { using namespace HashTable; -class HashTableRecordTestFixture -{ -protected: - void Run(bool isFixedKey, bool isFixedValue, bool useMetaValue) - { - BOOST_TEST_MESSAGE( - "Running with isFixedKey=" << isFixedKey - << ", isFixedValue=" << isFixedValue - << ", useMetatValue=" << useMetaValue); +class HashTableRecordTestFixture { + protected: + void Run(bool isFixedKey, bool isFixedValue, bool useMetaValue) { + BOOST_TEST_MESSAGE("Running with isFixedKey=" + << isFixedKey << ", isFixedValue=" << isFixedValue + << ", useMetatValue=" << useMetaValue); - const std::string key = "TestKey"; - const std::string value = "TestValue"; - const std::string metaValue = "TestMetavalue"; + const std::string key = "TestKey"; + const std::string value = "TestValue"; + const std::string metaValue = "TestMetavalue"; - const auto recordOverhead = (isFixedKey ? 0U : c_keyTypeSize) + (isFixedValue ? 0U : c_valueTypeSize); + const auto recordOverhead = (isFixedKey ? 0U : c_keyTypeSize) + + (isFixedValue ? 0U : c_valueTypeSize); - Validate( - RecordSerializer{ - isFixedKey ? static_cast(key.size()) : std::uint16_t(0), - isFixedValue ? static_cast(value.size()) : 0U, - useMetaValue ? static_cast(metaValue.size()) : 0U }, - key, - value, - recordOverhead + key.size() + value.size() + (useMetaValue ? metaValue.size() : 0U), - recordOverhead, - useMetaValue ? boost::optional{ metaValue } : boost::none); - } - -private: - void Validate( - const RecordSerializer& serializer, - const std::string& keyStr, - const std::string& valueStr, - std::size_t expectedBufferSize, - std::size_t expectedRecordOverheadSize, - boost::optional metadataStr = boost::none) - { - BOOST_CHECK_EQUAL(serializer.CalculateRecordOverhead(), expectedRecordOverheadSize); + Validate( + RecordSerializer{ + isFixedKey ? static_cast(key.size()) + : std::uint16_t(0), + isFixedValue + ? static_cast(value.size()) + : 0U, + useMetaValue + ? static_cast(metaValue.size()) + : 0U}, + key, value, + recordOverhead + key.size() + value.size() + + (useMetaValue ? metaValue.size() : 0U), + recordOverhead, + useMetaValue ? boost::optional{metaValue} + : boost::none); + } - const auto key = Utils::ConvertFromString(keyStr.c_str()); - const auto value = Utils::ConvertFromString(valueStr.c_str()); + private: + void Validate(const RecordSerializer& serializer, + const std::string& keyStr, + const std::string& valueStr, + std::size_t expectedBufferSize, + std::size_t expectedRecordOverheadSize, + boost::optional metadataStr = boost::none) { + BOOST_CHECK_EQUAL(serializer.CalculateRecordOverhead(), + expectedRecordOverheadSize); - const auto bufferSize = serializer.CalculateBufferSize(key, value); + const auto key = Utils::ConvertFromString(keyStr.c_str()); + const auto value = + Utils::ConvertFromString(valueStr.c_str()); - BOOST_REQUIRE_EQUAL(bufferSize, expectedBufferSize); - std::vector buffer(bufferSize); + const auto bufferSize = serializer.CalculateBufferSize(key, value); - RecordBuffer* recordBuffer = nullptr; + BOOST_REQUIRE_EQUAL(bufferSize, expectedBufferSize); + std::vector buffer(bufferSize); - if (metadataStr) - { - auto metaValue = Utils::ConvertFromString(metadataStr->c_str()); - recordBuffer = serializer.Serialize(key, value, metaValue, buffer.data(), bufferSize); - } - else - { - recordBuffer = serializer.Serialize(key, value, buffer.data(), bufferSize); - } + RecordBuffer* recordBuffer = nullptr; - const auto record = serializer.Deserialize(*recordBuffer); - - // Make sure the data serialized is in different memory location. - BOOST_CHECK(record.m_key.m_data != key.m_data); - BOOST_CHECK(record.m_value.m_data != value.m_data); - - BOOST_CHECK(record.m_key == key); - if (metadataStr) - { - const std::string newValueStr = *metadataStr + valueStr; - const auto newValue = Utils::ConvertFromString(newValueStr.c_str()); - BOOST_CHECK(record.m_value == newValue); - } - else - { - BOOST_CHECK(record.m_value == value); - } + if (metadataStr) { + auto metaValue = + Utils::ConvertFromString(metadataStr->c_str()); + recordBuffer = serializer.Serialize(key, value, metaValue, buffer.data(), + bufferSize); + } else { + recordBuffer = + serializer.Serialize(key, value, buffer.data(), bufferSize); } - static constexpr std::size_t c_keyTypeSize = sizeof(Record::Key::size_type); - static constexpr std::size_t c_valueTypeSize = sizeof(Record::Value::size_type); + const auto record = serializer.Deserialize(*recordBuffer); + + // Make sure the data serialized is in different memory location. + BOOST_CHECK(record.m_key.m_data != key.m_data); + BOOST_CHECK(record.m_value.m_data != value.m_data); + + BOOST_CHECK(record.m_key == key); + if (metadataStr) { + const std::string newValueStr = *metadataStr + valueStr; + const auto newValue = + Utils::ConvertFromString(newValueStr.c_str()); + BOOST_CHECK(record.m_value == newValue); + } else { + BOOST_CHECK(record.m_value == value); + } + } + + static constexpr std::size_t c_keyTypeSize = sizeof(Record::Key::size_type); + static constexpr std::size_t c_valueTypeSize = + sizeof(Record::Value::size_type); }; BOOST_FIXTURE_TEST_SUITE(HashTableRecordTests, HashTableRecordTestFixture) -BOOST_AUTO_TEST_CASE(RunAll) -{ - // Run all permutations for Run(), which takes three booleans. - for (int i = 0; i < 8; ++i) - { - Run( - !!((i >> 2) & 1), - !!((i >> 1) & 1), - !!((i) & 1)); - } +BOOST_AUTO_TEST_CASE(RunAll) { + // Run all permutations for Run(), which takes three booleans. + for (int i = 0; i < 8; ++i) { + Run(!!((i >> 2) & 1), !!((i >> 1) & 1), !!((i)&1)); + } } +BOOST_AUTO_TEST_CASE(InvalidSizeTest) { + std::vector buffer(100U); -BOOST_AUTO_TEST_CASE(InvalidSizeTest) -{ - std::vector buffer(100U); + RecordSerializer serializer{4, 5}; - RecordSerializer serializer{ 4, 5 }; + const std::string keyStr = "1234"; + const std::string invalidStr = "999999"; + const std::string valueStr = "12345"; - const std::string keyStr = "1234"; - const std::string invalidStr = "999999"; - const std::string valueStr = "12345"; + const auto key = Utils::ConvertFromString(keyStr.c_str()); + const auto value = Utils::ConvertFromString(valueStr.c_str()); - const auto key = Utils::ConvertFromString(keyStr.c_str()); - const auto value = Utils::ConvertFromString(valueStr.c_str()); + const auto invalidKey = + Utils::ConvertFromString(invalidStr.c_str()); + const auto invalidValue = + Utils::ConvertFromString(invalidStr.c_str()); - const auto invalidKey = Utils::ConvertFromString(invalidStr.c_str()); - const auto invalidValue = Utils::ConvertFromString(invalidStr.c_str()); + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + serializer.Serialize(invalidKey, value, buffer.data(), buffer.size()), + "Invalid key or value sizes are given."); - CHECK_EXCEPTION_THROWN_WITH_MESSAGE( - serializer.Serialize(invalidKey, value, buffer.data(), buffer.size()), - "Invalid key or value sizes are given."); + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + serializer.Serialize(key, invalidValue, buffer.data(), buffer.size()), + "Invalid key or value sizes are given."); - CHECK_EXCEPTION_THROWN_WITH_MESSAGE( - serializer.Serialize(key, invalidValue, buffer.data(), buffer.size()), - "Invalid key or value sizes are given."); + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + serializer.Serialize(invalidKey, invalidValue, buffer.data(), + buffer.size()), + "Invalid key or value sizes are given."); - CHECK_EXCEPTION_THROWN_WITH_MESSAGE( - serializer.Serialize(invalidKey, invalidValue, buffer.data(), buffer.size()), - "Invalid key or value sizes are given."); + // Normal case shouldn't thrown an exception. + serializer.Serialize(key, value, buffer.data(), buffer.size()); - // Normal case shouldn't thrown an exception. - serializer.Serialize(key, value, buffer.data(), buffer.size()); + RecordSerializer serializerWithMetaValue{4, 5, 2}; + std::uint16_t metadata = 0; - RecordSerializer serializerWithMetaValue{ 4, 5, 2 }; - std::uint16_t metadata = 0; + Record::Value metaValue{reinterpret_cast(&metadata), + sizeof(metadata)}; - Record::Value metaValue{ - reinterpret_cast(&metadata), - sizeof(metadata) }; + // Normal case shouldn't thrown an exception. + serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), + buffer.size()); - // Normal case shouldn't thrown an exception. - serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size()); - - // Mismatching size is given. - metaValue.m_size = 1; - CHECK_EXCEPTION_THROWN_WITH_MESSAGE( - serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size()), - "Invalid meta value size is given."); + // Mismatching size is given. + metaValue.m_size = 1; + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), + buffer.size()), + "Invalid meta value size is given."); } BOOST_AUTO_TEST_SUITE_END() -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/HashTableServiceTest.cpp b/Unittests/HashTableServiceTest.cpp index f8f9523..533a8df 100644 --- a/Unittests/HashTableServiceTest.cpp +++ b/Unittests/HashTableServiceTest.cpp @@ -1,52 +1,46 @@ #include #include #include +#include "L4/LocalMemory/HashTableService.h" #include "Mocks.h" #include "Utils.h" -#include "L4/LocalMemory/HashTableService.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { -BOOST_AUTO_TEST_CASE(HashTableServiceTest) -{ - std::vector> dataSet; - for (std::uint16_t i = 0U; i < 100; ++i) - { - dataSet.emplace_back("key" + std::to_string(i), "value" + std::to_string(i)); - } - - LocalMemory::HashTableService htService; - htService.AddHashTable( - HashTableConfig("Table1", HashTableConfig::Setting{ 100U })); - htService.AddHashTable( - HashTableConfig( - "Table2", - HashTableConfig::Setting{ 1000U }, - HashTableConfig::Cache{ 1024, std::chrono::seconds{ 1U }, false })); - - for (const auto& data : dataSet) - { - htService.GetContext()["Table1"].Add( - Utils::ConvertFromString(data.first.c_str()), - Utils::ConvertFromString(data.second.c_str())); - } - - // Smoke tests for looking up the data . - { - auto context = htService.GetContext(); - for (const auto& data : dataSet) - { - IReadOnlyHashTable::Value val; - BOOST_CHECK(context["Table1"].Get( - Utils::ConvertFromString(data.first.c_str()), - val)); - BOOST_CHECK(Utils::ConvertToString(val) == data.second); - } +BOOST_AUTO_TEST_CASE(HashTableServiceTest) { + std::vector> dataSet; + for (std::uint16_t i = 0U; i < 100; ++i) { + dataSet.emplace_back("key" + std::to_string(i), + "value" + std::to_string(i)); + } + + LocalMemory::HashTableService htService; + htService.AddHashTable( + HashTableConfig("Table1", HashTableConfig::Setting{100U})); + htService.AddHashTable(HashTableConfig( + "Table2", HashTableConfig::Setting{1000U}, + HashTableConfig::Cache{1024, std::chrono::seconds{1U}, false})); + + for (const auto& data : dataSet) { + htService.GetContext()["Table1"].Add( + Utils::ConvertFromString(data.first.c_str()), + Utils::ConvertFromString( + data.second.c_str())); + } + + // Smoke tests for looking up the data . + { + auto context = htService.GetContext(); + for (const auto& data : dataSet) { + IReadOnlyHashTable::Value val; + BOOST_CHECK(context["Table1"].Get( + Utils::ConvertFromString(data.first.c_str()), + val)); + BOOST_CHECK(Utils::ConvertToString(val) == data.second); } + } } -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/Mocks.h b/Unittests/Mocks.h index 6068dc7..69df590 100644 --- a/Unittests/Mocks.h +++ b/Unittests/Mocks.h @@ -3,34 +3,23 @@ #include "L4/Epoch/IEpochActionManager.h" #include "L4/Log/PerfLogger.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { -class MockPerfLogger : public IPerfLogger -{ - virtual void Log(const IData& data) override - { - (void)data; - } +class MockPerfLogger : public IPerfLogger { + virtual void Log(const IData& data) override { (void)data; } }; -struct MockEpochManager : public IEpochActionManager -{ - MockEpochManager() - : m_numRegisterActionsCalled(0) - { - } +struct MockEpochManager : public IEpochActionManager { + MockEpochManager() : m_numRegisterActionsCalled(0) {} - virtual void RegisterAction(Action&& action) override - { - ++m_numRegisterActionsCalled; - action(); - }; + virtual void RegisterAction(Action&& action) override { + ++m_numRegisterActionsCalled; + action(); + }; - std::uint16_t m_numRegisterActionsCalled; + std::uint16_t m_numRegisterActionsCalled; }; -} // namespace UnitTests -} // namespace L4 \ No newline at end of file +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/PerfInfoTest.cpp b/Unittests/PerfInfoTest.cpp index 66c0ed9..2a7039b 100644 --- a/Unittests/PerfInfoTest.cpp +++ b/Unittests/PerfInfoTest.cpp @@ -2,103 +2,97 @@ #include #include "L4/Log/PerfLogger.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { -void CheckMinCounters(const HashTablePerfData& htPerfData) -{ - const auto maxValue = (std::numeric_limits::max)(); - /// Check if the min counter values are correctly initialized to max value. - BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinValueSize), maxValue); - BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinKeySize), maxValue); +void CheckMinCounters(const HashTablePerfData& htPerfData) { + const auto maxValue = (std::numeric_limits::max)(); + /// Check if the min counter values are correctly initialized to max value. + BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinValueSize), + maxValue); + BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinKeySize), maxValue); } -BOOST_AUTO_TEST_CASE(PerfCountersTest) -{ - enum class TestCounter - { - Counter = 0, - Count - }; +BOOST_AUTO_TEST_CASE(PerfCountersTest) { + enum class TestCounter { Counter = 0, Count }; - PerfCounters perfCounters; + PerfCounters perfCounters; - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 0); - - perfCounters.Set(TestCounter::Counter, 10); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); - - perfCounters.Increment(TestCounter::Counter); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 11); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 0); - perfCounters.Decrement(TestCounter::Counter); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); + perfCounters.Set(TestCounter::Counter, 10); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); - perfCounters.Add(TestCounter::Counter, 5); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 15); + perfCounters.Increment(TestCounter::Counter); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 11); - perfCounters.Subtract(TestCounter::Counter, 10); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 5); + perfCounters.Decrement(TestCounter::Counter); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); - perfCounters.Max(TestCounter::Counter, 10); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); + perfCounters.Add(TestCounter::Counter, 5); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 15); - perfCounters.Max(TestCounter::Counter, 9); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); + perfCounters.Subtract(TestCounter::Counter, 10); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 5); - perfCounters.Min(TestCounter::Counter, 1); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1); + perfCounters.Max(TestCounter::Counter, 10); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); - perfCounters.Min(TestCounter::Counter, 10); - BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1); + perfCounters.Max(TestCounter::Counter, 9); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); + + perfCounters.Min(TestCounter::Counter, 1); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1); + + perfCounters.Min(TestCounter::Counter, 10); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1); } +BOOST_AUTO_TEST_CASE(PerfDataTest) { + PerfData testPerfData; -BOOST_AUTO_TEST_CASE(PerfDataTest) -{ - PerfData testPerfData; + BOOST_CHECK(testPerfData.GetHashTablesPerfData().empty()); - BOOST_CHECK(testPerfData.GetHashTablesPerfData().empty()); + HashTablePerfData htPerfData1; + HashTablePerfData htPerfData2; + HashTablePerfData htPerfData3; - HashTablePerfData htPerfData1; - HashTablePerfData htPerfData2; - HashTablePerfData htPerfData3; + CheckMinCounters(htPerfData1); + CheckMinCounters(htPerfData2); + CheckMinCounters(htPerfData3); - CheckMinCounters(htPerfData1); - CheckMinCounters(htPerfData2); - CheckMinCounters(htPerfData3); + testPerfData.AddHashTablePerfData("HT1", htPerfData1); + testPerfData.AddHashTablePerfData("HT2", htPerfData2); + testPerfData.AddHashTablePerfData("HT3", htPerfData3); - testPerfData.AddHashTablePerfData("HT1", htPerfData1); - testPerfData.AddHashTablePerfData("HT2", htPerfData2); - testPerfData.AddHashTablePerfData("HT3", htPerfData3); + /// Update counters and check if they are correctly updated. + htPerfData1.Set(HashTablePerfCounter::TotalKeySize, 10); + htPerfData2.Set(HashTablePerfCounter::TotalKeySize, 20); + htPerfData3.Set(HashTablePerfCounter::TotalKeySize, 30); - /// Update counters and check if they are correctly updated. - htPerfData1.Set(HashTablePerfCounter::TotalKeySize, 10); - htPerfData2.Set(HashTablePerfCounter::TotalKeySize, 20); - htPerfData3.Set(HashTablePerfCounter::TotalKeySize, 30); + // Check if the hash table perf data is correctly registered. + const auto& hashTablesPerfData = testPerfData.GetHashTablesPerfData(); + BOOST_CHECK_EQUAL(hashTablesPerfData.size(), 3U); - // Check if the hash table perf data is correctly registered. - const auto& hashTablesPerfData = testPerfData.GetHashTablesPerfData(); - BOOST_CHECK_EQUAL(hashTablesPerfData.size(), 3U); - - { - auto htPerfDataIt = hashTablesPerfData.find("HT1"); - BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); - BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 10); - } - { - auto htPerfDataIt = hashTablesPerfData.find("HT2"); - BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); - BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 20); - } - { - auto htPerfDataIt = hashTablesPerfData.find("HT3"); - BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); - BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 30); - } + { + auto htPerfDataIt = hashTablesPerfData.find("HT1"); + BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); + BOOST_CHECK_EQUAL( + htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 10); + } + { + auto htPerfDataIt = hashTablesPerfData.find("HT2"); + BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); + BOOST_CHECK_EQUAL( + htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 20); + } + { + auto htPerfDataIt = hashTablesPerfData.find("HT3"); + BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); + BOOST_CHECK_EQUAL( + htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 30); + } } -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/ReadWriteHashTableSerializerTest.cpp b/Unittests/ReadWriteHashTableSerializerTest.cpp index 6706185..00cc183 100644 --- a/Unittests/ReadWriteHashTableSerializerTest.cpp +++ b/Unittests/ReadWriteHashTableSerializerTest.cpp @@ -1,18 +1,16 @@ #include -#include #include +#include #include -#include "Utils.h" -#include "Mocks.h" #include "L4/HashTable/ReadWrite/HashTable.h" #include "L4/HashTable/ReadWrite/Serializer.h" -#include "L4/Log/PerfCounter.h" #include "L4/LocalMemory/Memory.h" +#include "L4/Log/PerfCounter.h" +#include "Mocks.h" +#include "Utils.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { using namespace HashTable::ReadWrite; @@ -32,148 +30,127 @@ void ValidateSerializer( const KeyValuePairs& keyValuePairs, const Utils::ExpectedCounterValues& expectedCounterValuesAfterLoad, const Utils::ExpectedCounterValues& expectedCounterValuesAfterSerialization, - const Utils::ExpectedCounterValues& expectedCounterValuesAfterDeserialization) -{ - Memory memory; - MockEpochManager epochManager; + const Utils::ExpectedCounterValues& + expectedCounterValuesAfterDeserialization) { + Memory memory; + MockEpochManager epochManager; - auto hashTableHolder{ - memory.MakeUnique( - HashTable::Setting{ 5 }, memory.GetAllocator()) }; - BOOST_CHECK(hashTableHolder != nullptr); + auto hashTableHolder{memory.MakeUnique(HashTable::Setting{5}, + memory.GetAllocator())}; + BOOST_CHECK(hashTableHolder != nullptr); - WritableHashTable writableHashTable(*hashTableHolder, epochManager); + WritableHashTable writableHashTable(*hashTableHolder, + epochManager); - // Insert the given key/value pairs to the hash table. - for (const auto& pair : keyValuePairs) - { - auto key = Utils::ConvertFromString(pair.first.c_str()); - auto val = Utils::ConvertFromString(pair.second.c_str()); + // Insert the given key/value pairs to the hash table. + for (const auto& pair : keyValuePairs) { + auto key = + Utils::ConvertFromString(pair.first.c_str()); + auto val = Utils::ConvertFromString( + pair.second.c_str()); - writableHashTable.Add(key, val); - } + writableHashTable.Add(key, val); + } - const auto& perfData = writableHashTable.GetPerfData(); + const auto& perfData = writableHashTable.GetPerfData(); - Utils::ValidateCounters(perfData, expectedCounterValuesAfterLoad); + Utils::ValidateCounters(perfData, expectedCounterValuesAfterLoad); - // Now write the hash table to the stream. - std::ostringstream outStream; - serializer.Serialize(*hashTableHolder, outStream); - Utils::ValidateCounters(perfData, expectedCounterValuesAfterSerialization); + // Now write the hash table to the stream. + std::ostringstream outStream; + serializer.Serialize(*hashTableHolder, outStream); + Utils::ValidateCounters(perfData, expectedCounterValuesAfterSerialization); - // Read in the hash table from the stream and validate it. - std::istringstream inStream(outStream.str()); + // Read in the hash table from the stream and validate it. + std::istringstream inStream(outStream.str()); - // version == 0 means that it's run through the HashTableSerializer, thus the following can be skipped. - if (serializerVersion != 0) - { - std::uint8_t actualSerializerVersion = 0; - DeserializerHelper(inStream).Deserialize(actualSerializerVersion); - BOOST_CHECK(actualSerializerVersion == serializerVersion); - } - else - { - BOOST_REQUIRE(typeid(L4::HashTable::ReadWrite::Serializer) == typeid(Serializer)); - } + // version == 0 means that it's run through the HashTableSerializer, thus the + // following can be skipped. + if (serializerVersion != 0) { + std::uint8_t actualSerializerVersion = 0; + DeserializerHelper(inStream).Deserialize(actualSerializerVersion); + BOOST_CHECK(actualSerializerVersion == serializerVersion); + } else { + BOOST_REQUIRE(typeid(L4::HashTable::ReadWrite::Serializer< + HashTable, ReadOnlyHashTable>) == typeid(Serializer)); + } - auto newHashTableHolder = deserializer.Deserialize(memory, inStream); - BOOST_CHECK(newHashTableHolder != nullptr); + auto newHashTableHolder = deserializer.Deserialize(memory, inStream); + BOOST_CHECK(newHashTableHolder != nullptr); - WritableHashTable newWritableHashTable(*newHashTableHolder, epochManager); + WritableHashTable newWritableHashTable(*newHashTableHolder, + epochManager); - const auto& newPerfData = newWritableHashTable.GetPerfData(); + const auto& newPerfData = newWritableHashTable.GetPerfData(); - Utils::ValidateCounters(newPerfData, expectedCounterValuesAfterDeserialization); + Utils::ValidateCounters(newPerfData, + expectedCounterValuesAfterDeserialization); - // Make sure all the key/value pairs exist after deserialization. - for (const auto& pair : keyValuePairs) - { - auto key = Utils::ConvertFromString(pair.first.c_str()); - IReadOnlyHashTable::Value val; - BOOST_CHECK(newWritableHashTable.Get(key, val)); - BOOST_CHECK(Utils::ConvertToString(val) == pair.second); - } + // Make sure all the key/value pairs exist after deserialization. + for (const auto& pair : keyValuePairs) { + auto key = + Utils::ConvertFromString(pair.first.c_str()); + IReadOnlyHashTable::Value val; + BOOST_CHECK(newWritableHashTable.Get(key, val)); + BOOST_CHECK(Utils::ConvertToString(val) == pair.second); + } } - -BOOST_AUTO_TEST_CASE(CurrentSerializerTest) -{ - ValidateSerializer( - Current::Serializer{}, - Current::Deserializer{ L4::Utils::Properties{} }, - Current::c_version, - { - { "hello1", " world1" }, - { "hello2", " world2" }, - { "hello3", " world3" } - }, - { - { HashTablePerfCounter::RecordsCount, 3 }, - { HashTablePerfCounter::BucketsCount, 5 }, - { HashTablePerfCounter::TotalKeySize, 18 }, - { HashTablePerfCounter::TotalValueSize, 21 }, - { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 }, - { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 } - }, - { - { HashTablePerfCounter::RecordsCount, 3 }, - { HashTablePerfCounter::BucketsCount, 5 }, - { HashTablePerfCounter::TotalKeySize, 18 }, - { HashTablePerfCounter::TotalValueSize, 21 }, - { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 }, - { HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 } - }, - { - { HashTablePerfCounter::RecordsCount, 3 }, - { HashTablePerfCounter::BucketsCount, 5 }, - { HashTablePerfCounter::TotalKeySize, 18 }, - { HashTablePerfCounter::TotalValueSize, 21 }, - { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 }, - { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 } - }); +BOOST_AUTO_TEST_CASE(CurrentSerializerTest) { + ValidateSerializer( + Current::Serializer{}, + Current::Deserializer{ + L4::Utils::Properties{}}, + Current::c_version, + {{"hello1", " world1"}, {"hello2", " world2"}, {"hello3", " world3"}}, + {{HashTablePerfCounter::RecordsCount, 3}, + {HashTablePerfCounter::BucketsCount, 5}, + {HashTablePerfCounter::TotalKeySize, 18}, + {HashTablePerfCounter::TotalValueSize, 21}, + {HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0}, + {HashTablePerfCounter::RecordsCountSavedFromSerializer, 0}}, + {{HashTablePerfCounter::RecordsCount, 3}, + {HashTablePerfCounter::BucketsCount, 5}, + {HashTablePerfCounter::TotalKeySize, 18}, + {HashTablePerfCounter::TotalValueSize, 21}, + {HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0}, + {HashTablePerfCounter::RecordsCountSavedFromSerializer, 3}}, + {{HashTablePerfCounter::RecordsCount, 3}, + {HashTablePerfCounter::BucketsCount, 5}, + {HashTablePerfCounter::TotalKeySize, 18}, + {HashTablePerfCounter::TotalValueSize, 21}, + {HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3}, + {HashTablePerfCounter::RecordsCountSavedFromSerializer, 0}}); } - -BOOST_AUTO_TEST_CASE(HashTableSerializeTest) -{ - // This test case tests end to end scenario using the HashTableSerializer. - ValidateSerializer( - Serializer{}, - Deserializer{ L4::Utils::Properties{} }, - 0U, - { - { "hello1", " world1" }, - { "hello2", " world2" }, - { "hello3", " world3" } - }, - { - { HashTablePerfCounter::RecordsCount, 3 }, - { HashTablePerfCounter::BucketsCount, 5 }, - { HashTablePerfCounter::TotalKeySize, 18 }, - { HashTablePerfCounter::TotalValueSize, 21 }, - { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 }, - { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 } - }, - { - { HashTablePerfCounter::RecordsCount, 3 }, - { HashTablePerfCounter::BucketsCount, 5 }, - { HashTablePerfCounter::TotalKeySize, 18 }, - { HashTablePerfCounter::TotalValueSize, 21 }, - { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 }, - { HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 } - }, - { - { HashTablePerfCounter::RecordsCount, 3 }, - { HashTablePerfCounter::BucketsCount, 5 }, - { HashTablePerfCounter::TotalKeySize, 18 }, - { HashTablePerfCounter::TotalValueSize, 21 }, - { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 }, - { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 } - }); +BOOST_AUTO_TEST_CASE(HashTableSerializeTest) { + // This test case tests end to end scenario using the HashTableSerializer. + ValidateSerializer( + Serializer{}, + Deserializer{ + L4::Utils::Properties{}}, + 0U, {{"hello1", " world1"}, {"hello2", " world2"}, {"hello3", " world3"}}, + {{HashTablePerfCounter::RecordsCount, 3}, + {HashTablePerfCounter::BucketsCount, 5}, + {HashTablePerfCounter::TotalKeySize, 18}, + {HashTablePerfCounter::TotalValueSize, 21}, + {HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0}, + {HashTablePerfCounter::RecordsCountSavedFromSerializer, 0}}, + {{HashTablePerfCounter::RecordsCount, 3}, + {HashTablePerfCounter::BucketsCount, 5}, + {HashTablePerfCounter::TotalKeySize, 18}, + {HashTablePerfCounter::TotalValueSize, 21}, + {HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0}, + {HashTablePerfCounter::RecordsCountSavedFromSerializer, 3}}, + {{HashTablePerfCounter::RecordsCount, 3}, + {HashTablePerfCounter::BucketsCount, 5}, + {HashTablePerfCounter::TotalKeySize, 18}, + {HashTablePerfCounter::TotalValueSize, 21}, + {HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3}, + {HashTablePerfCounter::RecordsCountSavedFromSerializer, 0}}); } BOOST_AUTO_TEST_SUITE_END() -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/ReadWriteHashTableTest.cpp b/Unittests/ReadWriteHashTableTest.cpp index 8732c3c..82cc2cd 100644 --- a/Unittests/ReadWriteHashTableTest.cpp +++ b/Unittests/ReadWriteHashTableTest.cpp @@ -1,676 +1,626 @@ #include -#include "Utils.h" -#include "Mocks.h" #include "CheckedAllocator.h" -#include "L4/Log/PerfCounter.h" #include "L4/HashTable/ReadWrite/HashTable.h" +#include "L4/Log/PerfCounter.h" +#include "Mocks.h" +#include "Utils.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { using namespace HashTable::ReadWrite; -class ReadWriteHashTableTestFixture -{ -protected: - using Allocator = CheckedAllocator<>; - using HashTable = WritableHashTable::HashTable; +class ReadWriteHashTableTestFixture { + protected: + using Allocator = CheckedAllocator<>; + using HashTable = WritableHashTable::HashTable; - ReadWriteHashTableTestFixture() - : m_allocator{} - , m_epochManager{} - {} + ReadWriteHashTableTestFixture() : m_allocator{}, m_epochManager{} {} - Allocator m_allocator; - MockEpochManager m_epochManager; + Allocator m_allocator; + MockEpochManager m_epochManager; }; - BOOST_FIXTURE_TEST_SUITE(ReadWriteHashTableTests, ReadWriteHashTableTestFixture) +BOOST_AUTO_TEST_CASE(HashTableTest) { + HashTable hashTable{HashTable::Setting{100, 5}, m_allocator}; + WritableHashTable writableHashTable(hashTable, m_epochManager); + ReadOnlyHashTable readOnlyHashTable(hashTable); -BOOST_AUTO_TEST_CASE(HashTableTest) -{ - HashTable hashTable{ HashTable::Setting{ 100, 5 }, m_allocator }; + const auto& perfData = writableHashTable.GetPerfData(); + + { + // Check empty data. + + std::string keyStr = "hello"; + auto key = + Utils::ConvertFromString(keyStr.c_str()); + + IReadOnlyHashTable::Value data; + BOOST_CHECK(!readOnlyHashTable.Get(key, data)); + + const auto c_counterMaxValue = + (std::numeric_limits::max)(); + Utils::ValidateCounters( + perfData, {{HashTablePerfCounter::RecordsCount, 0}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::ChainingEntriesCount, 0}, + {HashTablePerfCounter::TotalKeySize, 0}, + {HashTablePerfCounter::TotalValueSize, 0}, + {HashTablePerfCounter::MinKeySize, c_counterMaxValue}, + {HashTablePerfCounter::MaxKeySize, 0}, + {HashTablePerfCounter::MinValueSize, c_counterMaxValue}, + {HashTablePerfCounter::MaxValueSize, 0}}); + } + + { + // First record added. + std::string keyStr = "hello"; + std::string valStr = "world"; + + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::RecordsCount, 1}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::ChainingEntriesCount, 0}, + {HashTablePerfCounter::TotalKeySize, 5}, + {HashTablePerfCounter::TotalValueSize, 5}, + {HashTablePerfCounter::MinKeySize, 5}, + {HashTablePerfCounter::MaxKeySize, 5}, + {HashTablePerfCounter::MinValueSize, 5}, + {HashTablePerfCounter::MaxValueSize, 5}}); + } + + { + // Second record added. + std::string keyStr = "hello2"; + std::string valStr = "world2"; + + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::RecordsCount, 2}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::ChainingEntriesCount, 0}, + {HashTablePerfCounter::TotalKeySize, 11}, + {HashTablePerfCounter::TotalValueSize, 11}, + {HashTablePerfCounter::MinKeySize, 5}, + {HashTablePerfCounter::MaxKeySize, 6}, + {HashTablePerfCounter::MinValueSize, 5}, + {HashTablePerfCounter::MaxValueSize, 6}}); + } + + { + // Update the key with value bigger than the existing values. + std::string keyStr = "hello"; + std::string valStr = "world long string"; + + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 1); + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::RecordsCount, 2}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::ChainingEntriesCount, 0}, + {HashTablePerfCounter::TotalKeySize, 11}, + {HashTablePerfCounter::TotalValueSize, 23}, + {HashTablePerfCounter::MinKeySize, 5}, + {HashTablePerfCounter::MaxKeySize, 6}, + {HashTablePerfCounter::MinValueSize, 5}, + {HashTablePerfCounter::MaxValueSize, 17}}); + } + + { + // Update the key with value smaller than the existing values. + std::string keyStr = "hello2"; + std::string valStr = "wo"; + + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 2); + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::RecordsCount, 2}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::ChainingEntriesCount, 0}, + {HashTablePerfCounter::TotalKeySize, 11}, + {HashTablePerfCounter::TotalValueSize, 19}, + {HashTablePerfCounter::MinKeySize, 5}, + {HashTablePerfCounter::MaxKeySize, 6}, + {HashTablePerfCounter::MinValueSize, 2}, + {HashTablePerfCounter::MaxValueSize, 17}}); + } + + { + // Remove the first key. + std::string keyStr = "hello"; + std::string valStr = ""; + + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); + + BOOST_CHECK(writableHashTable.Remove(key)); + BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 3); + + // Note that the Remove() doesn't change Min/Max counters by design. + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::RecordsCount, 1}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::ChainingEntriesCount, 0}, + {HashTablePerfCounter::TotalKeySize, 6}, + {HashTablePerfCounter::TotalValueSize, 2}, + {HashTablePerfCounter::MinKeySize, 5}, + {HashTablePerfCounter::MaxKeySize, 6}, + {HashTablePerfCounter::MinValueSize, 2}, + {HashTablePerfCounter::MaxValueSize, 17}}); + + // Remove the second key. + keyStr = "hello2"; + key = Utils::ConvertFromString(keyStr.c_str()); + + BOOST_CHECK(writableHashTable.Remove(key)); + BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 4); + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::RecordsCount, 0}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::ChainingEntriesCount, 0}, + {HashTablePerfCounter::TotalKeySize, 0}, + {HashTablePerfCounter::TotalValueSize, 0}, + {HashTablePerfCounter::MinKeySize, 5}, + {HashTablePerfCounter::MaxKeySize, 6}, + {HashTablePerfCounter::MinValueSize, 2}, + {HashTablePerfCounter::MaxValueSize, 17}}); + + // Removing the key that doesn't exist. + BOOST_CHECK(!writableHashTable.Remove(key)); + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::RecordsCount, 0}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::ChainingEntriesCount, 0}, + {HashTablePerfCounter::TotalKeySize, 0}, + {HashTablePerfCounter::TotalValueSize, 0}, + {HashTablePerfCounter::MinKeySize, 5}, + {HashTablePerfCounter::MaxKeySize, 6}, + {HashTablePerfCounter::MinValueSize, 2}, + {HashTablePerfCounter::MaxValueSize, 17}}); + } +} + +BOOST_AUTO_TEST_CASE(HashTableWithOneBucketTest) { + Allocator allocator; + HashTable hashTable{HashTable::Setting{1}, allocator}; + WritableHashTable writableHashTable(hashTable, m_epochManager); + ReadOnlyHashTable readOnlyHashTable(hashTable); + + const auto& perfData = writableHashTable.GetPerfData(); + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::ChainingEntriesCount, 0}}); + + const auto initialTotalIndexSize = + perfData.Get(HashTablePerfCounter::TotalIndexSize); + const std::size_t c_dataSetSize = HashTable::Entry::c_numDataPerEntry + 5U; + + std::size_t expectedTotalKeySize = 0U; + std::size_t expectedTotalValueSize = 0U; + + for (auto i = 0U; i < c_dataSetSize; ++i) { + std::stringstream keyStream; + keyStream << "key" << i; + + std::stringstream valStream; + valStream << "value" << i; + + std::string keyStr = keyStream.str(); + std::string valStr = valStream.str(); + + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); + + expectedTotalKeySize += key.m_size; + expectedTotalValueSize += val.m_size; + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + } + + using L4::HashTable::RecordSerializer; + + // Variable key/value sizes. + const auto recordOverhead = + RecordSerializer{0U, 0U}.CalculateRecordOverhead(); + + Utils::ValidateCounters( + perfData, {{HashTablePerfCounter::RecordsCount, c_dataSetSize}, + {HashTablePerfCounter::BucketsCount, 1}, + {HashTablePerfCounter::MaxBucketChainLength, 2}, + {HashTablePerfCounter::ChainingEntriesCount, 1}, + {HashTablePerfCounter::TotalKeySize, expectedTotalKeySize}, + {HashTablePerfCounter::TotalValueSize, expectedTotalValueSize}, + {HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + sizeof(HashTable::Entry) + + (c_dataSetSize * recordOverhead)}}); + + // Now replace with new values. + expectedTotalValueSize = 0U; + + for (auto i = 0U; i < c_dataSetSize; ++i) { + std::stringstream keyStream; + keyStream << "key" << i; + + std::stringstream valStream; + valStream << "val" << i; + + std::string keyStr = keyStream.str(); + std::string valStr = valStream.str(); + + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); + + expectedTotalValueSize += val.m_size; + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + } + + Utils::ValidateCounters( + perfData, {{HashTablePerfCounter::RecordsCount, c_dataSetSize}, + {HashTablePerfCounter::BucketsCount, 1}, + {HashTablePerfCounter::MaxBucketChainLength, 2}, + {HashTablePerfCounter::ChainingEntriesCount, 1}, + {HashTablePerfCounter::TotalKeySize, expectedTotalKeySize}, + {HashTablePerfCounter::TotalValueSize, expectedTotalValueSize}, + {HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + sizeof(HashTable::Entry) + + (c_dataSetSize * recordOverhead)}}); + + // Now remove all key-value. + for (auto i = 0U; i < c_dataSetSize; ++i) { + std::stringstream keyStream; + keyStream << "key" << i; + + std::string keyStr = keyStream.str(); + auto key = + Utils::ConvertFromString(keyStr.c_str()); + + BOOST_CHECK(writableHashTable.Remove(key)); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(!readOnlyHashTable.Get(key, value)); + } + + Utils::ValidateCounters(perfData, + {{HashTablePerfCounter::RecordsCount, 0}, + {HashTablePerfCounter::BucketsCount, 1}, + {HashTablePerfCounter::MaxBucketChainLength, 2}, + {HashTablePerfCounter::ChainingEntriesCount, 1}, + {HashTablePerfCounter::TotalKeySize, 0}, + {HashTablePerfCounter::TotalValueSize, 0}, + {HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + sizeof(HashTable::Entry)}}); + + // Try to add back to the same bucket (reusing existing entries) + expectedTotalKeySize = 0U; + expectedTotalValueSize = 0U; + + for (auto i = 0U; i < c_dataSetSize; ++i) { + std::stringstream keyStream; + keyStream << "key" << i; + + std::stringstream valStream; + valStream << "value" << i; + + std::string keyStr = keyStream.str(); + std::string valStr = valStream.str(); + + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); + + expectedTotalKeySize += key.m_size; + expectedTotalValueSize += val.m_size; + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + } + + Utils::ValidateCounters( + perfData, {{HashTablePerfCounter::RecordsCount, c_dataSetSize}, + {HashTablePerfCounter::BucketsCount, 1}, + {HashTablePerfCounter::MaxBucketChainLength, 2}, + {HashTablePerfCounter::ChainingEntriesCount, 1}, + {HashTablePerfCounter::TotalKeySize, expectedTotalKeySize}, + {HashTablePerfCounter::TotalValueSize, expectedTotalValueSize}, + {HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + sizeof(HashTable::Entry) + + (c_dataSetSize * recordOverhead)}}); +} + +BOOST_AUTO_TEST_CASE(AddRemoveSameKeyTest) { + HashTable hashTable{HashTable::Setting{100, 5}, m_allocator}; + WritableHashTable writableHashTable(hashTable, m_epochManager); + ReadOnlyHashTable readOnlyHashTable(hashTable); + + // Add two key/value pairs. + auto key1 = Utils::ConvertFromString("key1"); + auto val1 = Utils::ConvertFromString("val1"); + writableHashTable.Add(key1, val1); + + IReadOnlyHashTable::Value valueRetrieved; + BOOST_CHECK(readOnlyHashTable.Get(key1, valueRetrieved)); + BOOST_CHECK(valueRetrieved.m_size == val1.m_size); + BOOST_CHECK(!memcmp(valueRetrieved.m_data, val1.m_data, val1.m_size)); + + auto key2 = Utils::ConvertFromString("key2"); + auto val2 = Utils::ConvertFromString("val2"); + writableHashTable.Add(key2, val2); + + BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved)); + BOOST_CHECK(valueRetrieved.m_size == val2.m_size); + BOOST_CHECK(!memcmp(valueRetrieved.m_data, val2.m_data, val2.m_size)); + + const auto& perfData = writableHashTable.GetPerfData(); + + // Now remove the first record with key = "key1", which is at the head of the + // chain. + BOOST_CHECK(writableHashTable.Remove(key1)); + BOOST_CHECK(!readOnlyHashTable.Get(key1, valueRetrieved)); + Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1); + + // Now try update the record with key = "key2". This should correctly update + // the existing record instead of using the empty slot created by removing the + // record with key = "key1". + auto newVal2 = Utils::ConvertFromString("newVal2"); + writableHashTable.Add(key2, newVal2); + + BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved)); + BOOST_CHECK(valueRetrieved.m_size == newVal2.m_size); + BOOST_CHECK(!memcmp(valueRetrieved.m_data, newVal2.m_data, newVal2.m_size)); + Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1); + + // Remove the record with key = "key2". + BOOST_CHECK(writableHashTable.Remove(key2)); + BOOST_CHECK(!writableHashTable.Remove(key2)); + Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 0); +} + +BOOST_AUTO_TEST_CASE(FixedKeyValueHashTableTest) { + // Fixed 4 byte keys and 6 byte values. + std::vector settings = { + HashTable::Setting{100, 200, 4, 0}, HashTable::Setting{100, 200, 0, 6}, + HashTable::Setting{100, 200, 4, 6}}; + + for (const auto& setting : settings) { + HashTable hashTable{setting, m_allocator}; WritableHashTable writableHashTable(hashTable, m_epochManager); ReadOnlyHashTable readOnlyHashTable(hashTable); - const auto& perfData = writableHashTable.GetPerfData(); + constexpr std::uint8_t c_numRecords = 10; - { - // Check empty data. + for (std::uint8_t i = 0; i < c_numRecords; ++i) { + const std::string keyStr = "key" + std::to_string(i); + const std::string valueStr = "value" + std::to_string(i); - std::string keyStr = "hello"; - auto key = Utils::ConvertFromString(keyStr.c_str()); - - IReadOnlyHashTable::Value data; - BOOST_CHECK(!readOnlyHashTable.Get(key, data)); - - const auto c_counterMaxValue = (std::numeric_limits::max)(); - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 0 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::ChainingEntriesCount, 0 }, - { HashTablePerfCounter::TotalKeySize, 0 }, - { HashTablePerfCounter::TotalValueSize, 0 }, - { HashTablePerfCounter::MinKeySize, c_counterMaxValue }, - { HashTablePerfCounter::MaxKeySize, 0 }, - { HashTablePerfCounter::MinValueSize, c_counterMaxValue }, - { HashTablePerfCounter::MaxValueSize, 0 } - }); + writableHashTable.Add( + Utils::ConvertFromString(keyStr.c_str()), + Utils::ConvertFromString( + valueStr.c_str())); } + Utils::ValidateCounters(writableHashTable.GetPerfData(), + {{HashTablePerfCounter::RecordsCount, 10}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::TotalKeySize, 40}, + {HashTablePerfCounter::TotalValueSize, 60}, + {HashTablePerfCounter::MinKeySize, 4}, + {HashTablePerfCounter::MaxKeySize, 4}, + {HashTablePerfCounter::MinValueSize, 6}, + {HashTablePerfCounter::MaxValueSize, 6}}); - { - // First record added. - std::string keyStr = "hello"; - std::string valStr = "world"; + for (std::uint8_t i = 0; i < c_numRecords; ++i) { + const std::string keyStr = "key" + std::to_string(i); + const std::string valueStr = "value" + std::to_string(i); + const auto expectedValue = + Utils::ConvertFromString(valueStr.c_str()); - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); - - writableHashTable.Add(key, val); - - IReadOnlyHashTable::Value value; - BOOST_CHECK(readOnlyHashTable.Get(key, value)); - BOOST_CHECK(value.m_size == valStr.size()); - BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 1 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::ChainingEntriesCount, 0 }, - { HashTablePerfCounter::TotalKeySize, 5 }, - { HashTablePerfCounter::TotalValueSize, 5 }, - { HashTablePerfCounter::MinKeySize, 5 }, - { HashTablePerfCounter::MaxKeySize, 5 }, - { HashTablePerfCounter::MinValueSize, 5 }, - { HashTablePerfCounter::MaxValueSize, 5 } - }); + IReadOnlyHashTable::Value actualValue; + BOOST_CHECK(readOnlyHashTable.Get( + Utils::ConvertFromString(keyStr.c_str()), + actualValue)); + BOOST_CHECK(expectedValue == actualValue); } - { - // Second record added. - std::string keyStr = "hello2"; - std::string valStr = "world2"; - - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); - - writableHashTable.Add(key, val); - - IReadOnlyHashTable::Value value; - BOOST_CHECK(readOnlyHashTable.Get(key, value)); - BOOST_CHECK(value.m_size == valStr.size()); - BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 2 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::ChainingEntriesCount, 0 }, - { HashTablePerfCounter::TotalKeySize, 11 }, - { HashTablePerfCounter::TotalValueSize, 11 }, - { HashTablePerfCounter::MinKeySize, 5 }, - { HashTablePerfCounter::MaxKeySize, 6 }, - { HashTablePerfCounter::MinValueSize, 5 }, - { HashTablePerfCounter::MaxValueSize, 6 } - }); + for (std::uint8_t i = 0; i < c_numRecords; ++i) { + const std::string keyStr = "key" + std::to_string(i); + writableHashTable.Remove( + Utils::ConvertFromString(keyStr.c_str())); } - { - // Update the key with value bigger than the existing values. - std::string keyStr = "hello"; - std::string valStr = "world long string"; - - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); - - writableHashTable.Add(key, val); - - IReadOnlyHashTable::Value value; - BOOST_CHECK(readOnlyHashTable.Get(key, value)); - BOOST_CHECK(value.m_size == valStr.size()); - BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); - BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 1); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 2 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::ChainingEntriesCount, 0 }, - { HashTablePerfCounter::TotalKeySize, 11 }, - { HashTablePerfCounter::TotalValueSize, 23 }, - { HashTablePerfCounter::MinKeySize, 5 }, - { HashTablePerfCounter::MaxKeySize, 6 }, - { HashTablePerfCounter::MinValueSize, 5 }, - { HashTablePerfCounter::MaxValueSize, 17 } - }); - } - - { - // Update the key with value smaller than the existing values. - std::string keyStr = "hello2"; - std::string valStr = "wo"; - - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); - - writableHashTable.Add(key, val); - - IReadOnlyHashTable::Value value; - BOOST_CHECK(readOnlyHashTable.Get(key, value)); - BOOST_CHECK(value.m_size == valStr.size()); - BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); - BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 2); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 2 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::ChainingEntriesCount, 0 }, - { HashTablePerfCounter::TotalKeySize, 11 }, - { HashTablePerfCounter::TotalValueSize, 19 }, - { HashTablePerfCounter::MinKeySize, 5 }, - { HashTablePerfCounter::MaxKeySize, 6 }, - { HashTablePerfCounter::MinValueSize, 2 }, - { HashTablePerfCounter::MaxValueSize, 17 } - }); - } - - { - // Remove the first key. - std::string keyStr = "hello"; - std::string valStr = ""; - - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); - - BOOST_CHECK(writableHashTable.Remove(key)); - BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 3); - - // Note that the Remove() doesn't change Min/Max counters by design. - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 1 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::ChainingEntriesCount, 0 }, - { HashTablePerfCounter::TotalKeySize, 6 }, - { HashTablePerfCounter::TotalValueSize, 2 }, - { HashTablePerfCounter::MinKeySize, 5 }, - { HashTablePerfCounter::MaxKeySize, 6 }, - { HashTablePerfCounter::MinValueSize, 2 }, - { HashTablePerfCounter::MaxValueSize, 17 } - }); - - // Remove the second key. - keyStr = "hello2"; - key = Utils::ConvertFromString(keyStr.c_str()); - - BOOST_CHECK(writableHashTable.Remove(key)); - BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 4); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 0 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::ChainingEntriesCount, 0 }, - { HashTablePerfCounter::TotalKeySize, 0 }, - { HashTablePerfCounter::TotalValueSize, 0 }, - { HashTablePerfCounter::MinKeySize, 5 }, - { HashTablePerfCounter::MaxKeySize, 6 }, - { HashTablePerfCounter::MinValueSize, 2 }, - { HashTablePerfCounter::MaxValueSize, 17 } - }); - - // Removing the key that doesn't exist. - BOOST_CHECK(!writableHashTable.Remove(key)); - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 0 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::ChainingEntriesCount, 0 }, - { HashTablePerfCounter::TotalKeySize, 0 }, - { HashTablePerfCounter::TotalValueSize, 0 }, - { HashTablePerfCounter::MinKeySize, 5 }, - { HashTablePerfCounter::MaxKeySize, 6 }, - { HashTablePerfCounter::MinValueSize, 2 }, - { HashTablePerfCounter::MaxValueSize, 17 } - }); - } + Utils::ValidateCounters(writableHashTable.GetPerfData(), + {{HashTablePerfCounter::RecordsCount, 0}, + {HashTablePerfCounter::BucketsCount, 100}, + {HashTablePerfCounter::TotalKeySize, 0}, + {HashTablePerfCounter::TotalValueSize, 0}}); + } } +BOOST_AUTO_TEST_CASE(HashTableIteratorTest) { + Allocator allocator; + constexpr std::uint32_t c_numBuckets = 10; + HashTable hashTable{HashTable::Setting{c_numBuckets}, allocator}; + WritableHashTable writableHashTable(hashTable, m_epochManager); -BOOST_AUTO_TEST_CASE(HashTableWithOneBucketTest) -{ - Allocator allocator; - HashTable hashTable{ HashTable::Setting{ 1 }, allocator }; - WritableHashTable writableHashTable(hashTable, m_epochManager); - ReadOnlyHashTable readOnlyHashTable(hashTable); + { + // Empty data set, thus iterator cannot move. + auto iter = writableHashTable.GetIterator(); + BOOST_CHECK(!iter->MoveNext()); - const auto& perfData = writableHashTable.GetPerfData(); + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + iter->GetKey(), "HashTableIterator is not correctly used."); - Utils::ValidateCounters(perfData, { { HashTablePerfCounter::ChainingEntriesCount, 0 } }); + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + iter->GetValue(), "HashTableIterator is not correctly used."); + } - const auto initialTotalIndexSize = perfData.Get(HashTablePerfCounter::TotalIndexSize); - const std::size_t c_dataSetSize = HashTable::Entry::c_numDataPerEntry + 5U; + using Buffer = std::vector; + using BufferMap = std::map; - std::size_t expectedTotalKeySize = 0U; - std::size_t expectedTotalValueSize = 0U; + BufferMap keyValueMap; - for (auto i = 0U; i < c_dataSetSize; ++i) - { - std::stringstream keyStream; - keyStream << "key" << i; + // The number of records should be such that it will create chained entries + // for at least one bucket. So it should be greater than + // HashTable::Entry::c_numDataPerEntry * number of buckets. + constexpr std::uint32_t c_numRecords = + (HashTable::Entry::c_numDataPerEntry * c_numBuckets) + 1; - std::stringstream valStream; - valStream << "value" << i; + for (auto i = 0U; i < c_numRecords; ++i) { + std::stringstream keyStream; + keyStream << "key" << i; - std::string keyStr = keyStream.str(); - std::string valStr = valStream.str(); + std::stringstream valStream; + valStream << "value" << i; - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); + std::string keyStr = keyStream.str(); + std::string valStr = valStream.str(); - expectedTotalKeySize += key.m_size; - expectedTotalValueSize += val.m_size; + auto key = + Utils::ConvertFromString(keyStr.c_str()); + auto val = + Utils::ConvertFromString(valStr.c_str()); - writableHashTable.Add(key, val); + writableHashTable.Add(key, val); - IReadOnlyHashTable::Value value; - BOOST_CHECK(readOnlyHashTable.Get(key, value)); - BOOST_CHECK(value.m_size == valStr.size()); - BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + keyValueMap[Buffer(key.m_data, key.m_data + key.m_size)] = + Buffer(val.m_data, val.m_data + val.m_size); + } + + BOOST_REQUIRE(writableHashTable.GetPerfData().Get( + HashTablePerfCounter::MaxBucketChainLength) >= 2); + BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords); + + { + BufferMap keyValueMapFromIterator; + + // Validate the data using the iterator. + auto iter = writableHashTable.GetIterator(); + for (auto i = 0U; i < c_numRecords; ++i) { + BOOST_CHECK(iter->MoveNext()); + + const auto& key = iter->GetKey(); + const auto& val = iter->GetValue(); + + keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] = + Buffer(val.m_data, val.m_data + val.m_size); } + BOOST_CHECK(!iter->MoveNext()); + BOOST_CHECK(keyValueMap == keyValueMapFromIterator); - using L4::HashTable::RecordSerializer; - - // Variable key/value sizes. - const auto recordOverhead = RecordSerializer{ 0U, 0U }.CalculateRecordOverhead(); - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, c_dataSetSize }, - { HashTablePerfCounter::BucketsCount, 1 }, - { HashTablePerfCounter::MaxBucketChainLength, 2 }, - { HashTablePerfCounter::ChainingEntriesCount, 1 }, - { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize }, - { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize }, - { - HashTablePerfCounter::TotalIndexSize, - initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead) - } - }); - - // Now replace with new values. - expectedTotalValueSize = 0U; - - for (auto i = 0U; i < c_dataSetSize; ++i) - { - std::stringstream keyStream; - keyStream << "key" << i; - - std::stringstream valStream; - valStream << "val" << i; - - std::string keyStr = keyStream.str(); - std::string valStr = valStream.str(); - - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); - - expectedTotalValueSize += val.m_size; - - writableHashTable.Add(key, val); - - IReadOnlyHashTable::Value value; - BOOST_CHECK(readOnlyHashTable.Get(key, value)); - BOOST_CHECK(value.m_size == valStr.size()); - BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + // Reset should move the iterator to the beginning. + iter->Reset(); + for (auto i = 0U; i < c_numRecords; ++i) { + BOOST_CHECK(iter->MoveNext()); } + BOOST_CHECK(!iter->MoveNext()); + } - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, c_dataSetSize }, - { HashTablePerfCounter::BucketsCount, 1 }, - { HashTablePerfCounter::MaxBucketChainLength, 2 }, - { HashTablePerfCounter::ChainingEntriesCount, 1 }, - { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize }, - { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize }, - { - HashTablePerfCounter::TotalIndexSize, - initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead) - } - }); + // Remove half of the key. + for (auto i = 0U; i < c_numRecords; ++i) { + if (i % 2 == 0U) { + std::stringstream keyStream; + keyStream << "key" << i; - // Now remove all key-value. - for (auto i = 0U; i < c_dataSetSize; ++i) - { - std::stringstream keyStream; - keyStream << "key" << i; + std::string keyStr = keyStream.str(); + auto key = + Utils::ConvertFromString(keyStr.c_str()); - std::string keyStr = keyStream.str(); - auto key = Utils::ConvertFromString(keyStr.c_str()); + BOOST_CHECK(writableHashTable.Remove(key)); - BOOST_CHECK(writableHashTable.Remove(key)); - - IReadOnlyHashTable::Value value; - BOOST_CHECK(!readOnlyHashTable.Get(key, value)); + keyValueMap.erase(Buffer(key.m_data, key.m_data + key.m_size)); } + } - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, 0 }, - { HashTablePerfCounter::BucketsCount, 1 }, - { HashTablePerfCounter::MaxBucketChainLength, 2 }, - { HashTablePerfCounter::ChainingEntriesCount, 1 }, - { HashTablePerfCounter::TotalKeySize, 0 }, - { HashTablePerfCounter::TotalValueSize, 0 }, - { - HashTablePerfCounter::TotalIndexSize, - initialTotalIndexSize + sizeof(HashTable::Entry) - } - }); + BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords / 2U); - // Try to add back to the same bucket (reusing existing entries) - expectedTotalKeySize = 0U; - expectedTotalValueSize = 0U; + // Validate only the existing keys are iterated. + { + BufferMap keyValueMapFromIterator; + auto iter = writableHashTable.GetIterator(); + for (auto i = 0U; i < c_numRecords / 2U; ++i) { + BOOST_CHECK(iter->MoveNext()); - for (auto i = 0U; i < c_dataSetSize; ++i) - { - std::stringstream keyStream; - keyStream << "key" << i; + const auto& key = iter->GetKey(); + const auto& val = iter->GetValue(); - std::stringstream valStream; - valStream << "value" << i; - - std::string keyStr = keyStream.str(); - std::string valStr = valStream.str(); - - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); - - expectedTotalKeySize += key.m_size; - expectedTotalValueSize += val.m_size; - - writableHashTable.Add(key, val); - - IReadOnlyHashTable::Value value; - BOOST_CHECK(readOnlyHashTable.Get(key, value)); - BOOST_CHECK(value.m_size == valStr.size()); - BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); - } - - Utils::ValidateCounters( - perfData, - { - { HashTablePerfCounter::RecordsCount, c_dataSetSize }, - { HashTablePerfCounter::BucketsCount, 1 }, - { HashTablePerfCounter::MaxBucketChainLength, 2 }, - { HashTablePerfCounter::ChainingEntriesCount, 1 }, - { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize }, - { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize }, - { - HashTablePerfCounter::TotalIndexSize, - initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead) - } - }); -} - - -BOOST_AUTO_TEST_CASE(AddRemoveSameKeyTest) -{ - HashTable hashTable{ HashTable::Setting{ 100, 5 }, m_allocator }; - WritableHashTable writableHashTable(hashTable, m_epochManager); - ReadOnlyHashTable readOnlyHashTable(hashTable); - - // Add two key/value pairs. - auto key1 = Utils::ConvertFromString("key1"); - auto val1 = Utils::ConvertFromString("val1"); - writableHashTable.Add(key1, val1); - - IReadOnlyHashTable::Value valueRetrieved; - BOOST_CHECK(readOnlyHashTable.Get(key1, valueRetrieved)); - BOOST_CHECK(valueRetrieved.m_size == val1.m_size); - BOOST_CHECK(!memcmp(valueRetrieved.m_data, val1.m_data, val1.m_size)); - - auto key2 = Utils::ConvertFromString("key2"); - auto val2 = Utils::ConvertFromString("val2"); - writableHashTable.Add(key2, val2); - - BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved)); - BOOST_CHECK(valueRetrieved.m_size == val2.m_size); - BOOST_CHECK(!memcmp(valueRetrieved.m_data, val2.m_data, val2.m_size)); - - const auto& perfData = writableHashTable.GetPerfData(); - - // Now remove the first record with key = "key1", which is at the head of the chain. - BOOST_CHECK(writableHashTable.Remove(key1)); - BOOST_CHECK(!readOnlyHashTable.Get(key1, valueRetrieved)); - Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1); - - // Now try update the record with key = "key2". This should correctly update the existing record - // instead of using the empty slot created by removing the record with key = "key1". - auto newVal2 = Utils::ConvertFromString("newVal2"); - writableHashTable.Add(key2, newVal2); - - BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved)); - BOOST_CHECK(valueRetrieved.m_size == newVal2.m_size); - BOOST_CHECK(!memcmp(valueRetrieved.m_data, newVal2.m_data, newVal2.m_size)); - Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1); - - // Remove the record with key = "key2". - BOOST_CHECK(writableHashTable.Remove(key2)); - BOOST_CHECK(!writableHashTable.Remove(key2)); - Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 0); -} - - -BOOST_AUTO_TEST_CASE(FixedKeyValueHashTableTest) -{ - // Fixed 4 byte keys and 6 byte values. - std::vector settings = - { - HashTable::Setting{ 100, 200, 4, 0 }, - HashTable::Setting{ 100, 200, 0, 6 }, - HashTable::Setting{ 100, 200, 4, 6 } - }; - - for (const auto& setting : settings) - { - HashTable hashTable{ setting, m_allocator }; - WritableHashTable writableHashTable(hashTable, m_epochManager); - ReadOnlyHashTable readOnlyHashTable(hashTable); - - constexpr std::uint8_t c_numRecords = 10; - - for (std::uint8_t i = 0; i < c_numRecords; ++i) - { - const std::string keyStr = "key" + std::to_string(i); - const std::string valueStr = "value" + std::to_string(i); - - writableHashTable.Add( - Utils::ConvertFromString(keyStr.c_str()), - Utils::ConvertFromString(valueStr.c_str())); - } - - Utils::ValidateCounters( - writableHashTable.GetPerfData(), - { - { HashTablePerfCounter::RecordsCount, 10 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::TotalKeySize, 40 }, - { HashTablePerfCounter::TotalValueSize, 60 }, - { HashTablePerfCounter::MinKeySize, 4 }, - { HashTablePerfCounter::MaxKeySize, 4 }, - { HashTablePerfCounter::MinValueSize, 6 }, - { HashTablePerfCounter::MaxValueSize, 6 } - }); - - for (std::uint8_t i = 0; i < c_numRecords; ++i) - { - const std::string keyStr = "key" + std::to_string(i); - const std::string valueStr = "value" + std::to_string(i); - const auto expectedValue = Utils::ConvertFromString(valueStr.c_str()); - - IReadOnlyHashTable::Value actualValue; - BOOST_CHECK(readOnlyHashTable.Get( - Utils::ConvertFromString(keyStr.c_str()), - actualValue)); - BOOST_CHECK(expectedValue == actualValue); - } - - for (std::uint8_t i = 0; i < c_numRecords; ++i) - { - const std::string keyStr = "key" + std::to_string(i); - writableHashTable.Remove( - Utils::ConvertFromString(keyStr.c_str())); - } - - Utils::ValidateCounters( - writableHashTable.GetPerfData(), - { - { HashTablePerfCounter::RecordsCount, 0 }, - { HashTablePerfCounter::BucketsCount, 100 }, - { HashTablePerfCounter::TotalKeySize, 0 }, - { HashTablePerfCounter::TotalValueSize, 0 } - }); - } -} - - -BOOST_AUTO_TEST_CASE(HashTableIteratorTest) -{ - Allocator allocator; - constexpr std::uint32_t c_numBuckets = 10; - HashTable hashTable{ HashTable::Setting{ c_numBuckets }, allocator }; - WritableHashTable writableHashTable(hashTable, m_epochManager); - - { - // Empty data set, thus iterator cannot move. - auto iter = writableHashTable.GetIterator(); - BOOST_CHECK(!iter->MoveNext()); - - CHECK_EXCEPTION_THROWN_WITH_MESSAGE( - iter->GetKey(), - "HashTableIterator is not correctly used."); - - CHECK_EXCEPTION_THROWN_WITH_MESSAGE( - iter->GetValue(), - "HashTableIterator is not correctly used."); - } - - using Buffer = std::vector; - using BufferMap = std::map; - - BufferMap keyValueMap; - - // The number of records should be such that it will create chained entries - // for at least one bucket. So it should be greater than HashTable::Entry::c_numDataPerEntry * number of buckets. - constexpr std::uint32_t c_numRecords = (HashTable::Entry::c_numDataPerEntry * c_numBuckets) + 1; - - for (auto i = 0U; i < c_numRecords; ++i) - { - std::stringstream keyStream; - keyStream << "key" << i; - - std::stringstream valStream; - valStream << "value" << i; - - std::string keyStr = keyStream.str(); - std::string valStr = valStream.str(); - - auto key = Utils::ConvertFromString(keyStr.c_str()); - auto val = Utils::ConvertFromString(valStr.c_str()); - - writableHashTable.Add(key, val); - - keyValueMap[Buffer(key.m_data, key.m_data + key.m_size)] = Buffer(val.m_data, val.m_data + val.m_size); - } - - BOOST_REQUIRE(writableHashTable.GetPerfData().Get(HashTablePerfCounter::MaxBucketChainLength) >= 2); - BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords); - - { - BufferMap keyValueMapFromIterator; - - // Validate the data using the iterator. - auto iter = writableHashTable.GetIterator(); - for (auto i = 0U; i < c_numRecords; ++i) - { - BOOST_CHECK(iter->MoveNext()); - - const auto& key = iter->GetKey(); - const auto& val = iter->GetValue(); - - keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] = Buffer(val.m_data, val.m_data + val.m_size); - } - BOOST_CHECK(!iter->MoveNext()); - BOOST_CHECK(keyValueMap == keyValueMapFromIterator); - - // Reset should move the iterator to the beginning. - iter->Reset(); - for (auto i = 0U; i < c_numRecords; ++i) - { - BOOST_CHECK(iter->MoveNext()); - } - BOOST_CHECK(!iter->MoveNext()); - } - - // Remove half of the key. - for (auto i = 0U; i < c_numRecords; ++i) - { - if (i % 2 == 0U) - { - std::stringstream keyStream; - keyStream << "key" << i; - - std::string keyStr = keyStream.str(); - auto key = Utils::ConvertFromString(keyStr.c_str()); - - BOOST_CHECK(writableHashTable.Remove(key)); - - keyValueMap.erase(Buffer(key.m_data, key.m_data + key.m_size)); - } - } - - BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords / 2U); - - // Validate only the existing keys are iterated. - { - BufferMap keyValueMapFromIterator; - auto iter = writableHashTable.GetIterator(); - for (auto i = 0U; i < c_numRecords / 2U; ++i) - { - BOOST_CHECK(iter->MoveNext()); - - const auto& key = iter->GetKey(); - const auto& val = iter->GetValue(); - - keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] = - Buffer(val.m_data, val.m_data + val.m_size); - } - BOOST_CHECK(!iter->MoveNext()); - BOOST_CHECK(keyValueMap == keyValueMapFromIterator); + keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] = + Buffer(val.m_data, val.m_data + val.m_size); } + BOOST_CHECK(!iter->MoveNext()); + BOOST_CHECK(keyValueMap == keyValueMapFromIterator); + } } BOOST_AUTO_TEST_SUITE_END() -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/SettingAdapterTest.cpp b/Unittests/SettingAdapterTest.cpp index 62fa8ab..0c7eb2f 100644 --- a/Unittests/SettingAdapterTest.cpp +++ b/Unittests/SettingAdapterTest.cpp @@ -1,41 +1,37 @@ #include -#include "L4/HashTable/Common/SettingAdapter.h" -#include "L4/HashTable/Common/Record.h" #include "CheckedAllocator.h" +#include "L4/HashTable/Common/Record.h" +#include "L4/HashTable/Common/SettingAdapter.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { -using SharedHashTable = HashTable::SharedHashTable>; +using SharedHashTable = + HashTable::SharedHashTable>; BOOST_AUTO_TEST_SUITE(SettingAdapterTests) -BOOST_AUTO_TEST_CASE(SettingAdapterTestWithDefaultValues) -{ - HashTableConfig::Setting from{ 100U }; - const auto to = HashTable::SettingAdapter{}.Convert(from); +BOOST_AUTO_TEST_CASE(SettingAdapterTestWithDefaultValues) { + HashTableConfig::Setting from{100U}; + const auto to = HashTable::SettingAdapter{}.Convert(from); - BOOST_CHECK_EQUAL(to.m_numBuckets, 100U); - BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 1U); - BOOST_CHECK_EQUAL(to.m_fixedKeySize, 0U); - BOOST_CHECK_EQUAL(to.m_fixedValueSize, 0U); + BOOST_CHECK_EQUAL(to.m_numBuckets, 100U); + BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 1U); + BOOST_CHECK_EQUAL(to.m_fixedKeySize, 0U); + BOOST_CHECK_EQUAL(to.m_fixedValueSize, 0U); } +BOOST_AUTO_TEST_CASE(SettingAdapterTestWithNonDefaultValues) { + HashTableConfig::Setting from{100U, 10U, 5U, 20U}; + const auto to = HashTable::SettingAdapter{}.Convert(from); -BOOST_AUTO_TEST_CASE(SettingAdapterTestWithNonDefaultValues) -{ - HashTableConfig::Setting from{ 100U, 10U, 5U, 20U }; - const auto to = HashTable::SettingAdapter{}.Convert(from); - - BOOST_CHECK_EQUAL(to.m_numBuckets, 100U); - BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 10U); - BOOST_CHECK_EQUAL(to.m_fixedKeySize, 5U); - BOOST_CHECK_EQUAL(to.m_fixedValueSize, 20U); + BOOST_CHECK_EQUAL(to.m_numBuckets, 100U); + BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 10U); + BOOST_CHECK_EQUAL(to.m_fixedKeySize, 5U); + BOOST_CHECK_EQUAL(to.m_fixedValueSize, 20U); } BOOST_AUTO_TEST_SUITE_END() -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/Utils.cpp b/Unittests/Utils.cpp index 53932f1..765e00c 100644 --- a/Unittests/Utils.cpp +++ b/Unittests/Utils.cpp @@ -1,37 +1,27 @@ -#include #include "Utils.h" +#include -namespace L4 -{ -namespace UnitTests -{ -namespace Utils -{ +namespace L4 { +namespace UnitTests { +namespace Utils { - -void ValidateCounter( - const HashTablePerfData& actual, - HashTablePerfCounter perfCounter, - PerfCounters::TValue expectedValue) -{ - BOOST_CHECK_MESSAGE( - actual.Get(perfCounter) == expectedValue, - c_hashTablePerfCounterNames[static_cast(perfCounter)] - << " counter: " - << actual.Get(perfCounter) - << " (actual) != " << expectedValue << " (expected)."); +void ValidateCounter(const HashTablePerfData& actual, + HashTablePerfCounter perfCounter, + PerfCounters::TValue expectedValue) { + BOOST_CHECK_MESSAGE( + actual.Get(perfCounter) == expectedValue, + c_hashTablePerfCounterNames[static_cast(perfCounter)] + << " counter: " << actual.Get(perfCounter) + << " (actual) != " << expectedValue << " (expected)."); } -void ValidateCounters( - const HashTablePerfData& actual, - const ExpectedCounterValues& expected) -{ - for (const auto& expectedCounter : expected) - { - ValidateCounter(actual, expectedCounter.first, expectedCounter.second); - } +void ValidateCounters(const HashTablePerfData& actual, + const ExpectedCounterValues& expected) { + for (const auto& expectedCounter : expected) { + ValidateCounter(actual, expectedCounter.first, expectedCounter.second); + } } -} // namespace Utils -} // namespace UnitTests -} // namespace L4 +} // namespace Utils +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/Utils.h b/Unittests/Utils.h index 8f0b68b..212f28e 100644 --- a/Unittests/Utils.h +++ b/Unittests/Utils.h @@ -1,111 +1,88 @@ #pragma once +#include #include #include #include -#include #include #include "L4/Log/PerfCounter.h" #include "L4/Utils/Exception.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { // Macro CHECK_EXCEPTION_THROWN #define CHECK_EXCEPTION_THROWN(statement) \ -do { \ - bool isExceptionThrown = false;\ - try \ - { \ - statement; \ - } \ - catch (const RuntimeException&) \ - { \ - isExceptionThrown = true; \ - } \ - BOOST_CHECK(isExceptionThrown); \ -} while (0) - - -#define CHECK_EXCEPTION_THROWN_WITH_MESSAGE(statement, message) \ -do { \ - bool isExceptionThrown = false; \ - std::string exceptionMsg; \ - try \ - { \ - statement; \ - } \ - catch (const RuntimeException& ex) \ - { \ - isExceptionThrown = true; \ - exceptionMsg = ex.what(); \ - BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \ - } \ - BOOST_CHECK(isExceptionThrown); \ - BOOST_CHECK(strcmp((message), exceptionMsg.c_str()) == 0); \ -} while (0) + do { \ + bool isExceptionThrown = false; \ + try { \ + statement; \ + } catch (const RuntimeException&) { \ + isExceptionThrown = true; \ + } \ + BOOST_CHECK(isExceptionThrown); \ + } while (0) +#define CHECK_EXCEPTION_THROWN_WITH_MESSAGE(statement, message) \ + do { \ + bool isExceptionThrown = false; \ + std::string exceptionMsg; \ + try { \ + statement; \ + } catch (const RuntimeException& ex) { \ + isExceptionThrown = true; \ + exceptionMsg = ex.what(); \ + BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \ + } \ + BOOST_CHECK(isExceptionThrown); \ + BOOST_CHECK(strcmp((message), exceptionMsg.c_str()) == 0); \ + } while (0) // This will validate the given message is a prefix of the exception message. -#define CHECK_EXCEPTION_THROWN_WITH_PREFIX_MESSAGE(statement, message) \ -do { \ - bool isExceptionThrown = false; \ - std::string exceptionMsg; \ - try \ - { \ - statement; \ - } \ - catch (const RuntimeException& ex) \ - { \ - isExceptionThrown = true; \ - exceptionMsg = ex.what(); \ - BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \ - } \ - BOOST_CHECK(isExceptionThrown); \ +#define CHECK_EXCEPTION_THROWN_WITH_PREFIX_MESSAGE(statement, message) \ + do { \ + bool isExceptionThrown = false; \ + std::string exceptionMsg; \ + try { \ + statement; \ + } catch (const RuntimeException& ex) { \ + isExceptionThrown = true; \ + exceptionMsg = ex.what(); \ + BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \ + } \ + BOOST_CHECK(isExceptionThrown); \ BOOST_CHECK(exceptionMsg.compare(0, strlen(message), message) == 0); \ -} while (0) + } while (0) - -namespace Utils -{ +namespace Utils { template -T ConvertFromString(const char* str) -{ - return T( - reinterpret_cast(str), - static_cast(strlen(str))); +T ConvertFromString(const char* str) { + return T(reinterpret_cast(str), + static_cast(strlen(str))); } template -std::string ConvertToString(const T& t) -{ - return std::string(reinterpret_cast(t.m_data), t.m_size); +std::string ConvertToString(const T& t) { + return std::string(reinterpret_cast(t.m_data), t.m_size); } - // Counter related validation util function. -using ExpectedCounterValues - = std::vector< - std::pair< - HashTablePerfCounter, - typename PerfCounters::TValue>>; +using ExpectedCounterValues = + std::vector::TValue>>; // Validate the given perfData against the expected counter value. -void ValidateCounter( - const HashTablePerfData& actual, - HashTablePerfCounter perfCounter, - PerfCounters::TValue expectedValue); +void ValidateCounter(const HashTablePerfData& actual, + HashTablePerfCounter perfCounter, + PerfCounters::TValue expectedValue); // Validate the given perfData against the expected counter values. -void ValidateCounters( - const HashTablePerfData& actual, - const ExpectedCounterValues& expected); +void ValidateCounters(const HashTablePerfData& actual, + const ExpectedCounterValues& expected); -} // namespace Utils -} // namespace UnitTests -} // namespace L4 +} // namespace Utils +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/UtilsTest.cpp b/Unittests/UtilsTest.cpp index b41056e..c4e3f13 100644 --- a/Unittests/UtilsTest.cpp +++ b/Unittests/UtilsTest.cpp @@ -1,54 +1,53 @@ -#include #include +#include #include "L4/Utils/Math.h" -namespace L4 -{ -namespace UnitTests -{ +namespace L4 { +namespace UnitTests { using namespace Utils; -BOOST_AUTO_TEST_CASE(MathTest) -{ - // RoundUp tests. - BOOST_CHECK_EQUAL(Math::RoundUp(5, 10), 10); - BOOST_CHECK_EQUAL(Math::RoundUp(10, 10), 10); - BOOST_CHECK_EQUAL(Math::RoundUp(11, 10), 20); - BOOST_CHECK_EQUAL(Math::RoundUp(5, 0), 5); +BOOST_AUTO_TEST_CASE(MathTest) { + // RoundUp tests. + BOOST_CHECK_EQUAL(Math::RoundUp(5, 10), 10); + BOOST_CHECK_EQUAL(Math::RoundUp(10, 10), 10); + BOOST_CHECK_EQUAL(Math::RoundUp(11, 10), 20); + BOOST_CHECK_EQUAL(Math::RoundUp(5, 0), 5); - // RoundDown tests. - BOOST_CHECK_EQUAL(Math::RoundDown(5, 10), 0); - BOOST_CHECK_EQUAL(Math::RoundDown(10, 10), 10); - BOOST_CHECK_EQUAL(Math::RoundDown(11, 10), 10); - BOOST_CHECK_EQUAL(Math::RoundDown(5, 0), 5); + // RoundDown tests. + BOOST_CHECK_EQUAL(Math::RoundDown(5, 10), 0); + BOOST_CHECK_EQUAL(Math::RoundDown(10, 10), 10); + BOOST_CHECK_EQUAL(Math::RoundDown(11, 10), 10); + BOOST_CHECK_EQUAL(Math::RoundDown(5, 0), 5); - // IsPowerOfTwo tests. - BOOST_CHECK(Math::IsPowerOfTwo(2)); - BOOST_CHECK(Math::IsPowerOfTwo(4)); - BOOST_CHECK(!Math::IsPowerOfTwo(3)); - BOOST_CHECK(!Math::IsPowerOfTwo(0)); + // IsPowerOfTwo tests. + BOOST_CHECK(Math::IsPowerOfTwo(2)); + BOOST_CHECK(Math::IsPowerOfTwo(4)); + BOOST_CHECK(!Math::IsPowerOfTwo(3)); + BOOST_CHECK(!Math::IsPowerOfTwo(0)); - // NextHighestPowerOfTwo tests. - BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(0), 0U); - BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(1), 1U); - BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(2), 2U); - BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(3), 4U); - BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(4), 4U); - BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(5), 8U); - BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(200), 256U); + // NextHighestPowerOfTwo tests. + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(0), 0U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(1), 1U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(2), 2U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(3), 4U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(4), 4U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(5), 8U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(200), 256U); } +BOOST_AUTO_TEST_CASE(PointerArithmeticTest) { + std::array elements; -BOOST_AUTO_TEST_CASE(PointerArithmeticTest) -{ - std::array elements; - - BOOST_CHECK(reinterpret_cast(Math::PointerArithmetic::Add(&elements[0], sizeof(int))) == &elements[1]); - BOOST_CHECK(reinterpret_cast(Math::PointerArithmetic::Subtract(&elements[1], sizeof(int))) == &elements[0]); - BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[2], &elements[0]) == sizeof(int) * 2U); - BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[0], &elements[2]) == sizeof(int) * 2U); + BOOST_CHECK(reinterpret_cast(Math::PointerArithmetic::Add( + &elements[0], sizeof(int))) == &elements[1]); + BOOST_CHECK(reinterpret_cast(Math::PointerArithmetic::Subtract( + &elements[1], sizeof(int))) == &elements[0]); + BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[2], &elements[0]) == + sizeof(int) * 2U); + BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[0], &elements[2]) == + sizeof(int) * 2U); } -} // namespace UnitTests -} // namespace L4 +} // namespace UnitTests +} // namespace L4 diff --git a/inc/L4/Epoch/Config.h b/inc/L4/Epoch/Config.h index 295b2f3..7c5b648 100644 --- a/inc/L4/Epoch/Config.h +++ b/inc/L4/Epoch/Config.h @@ -1,32 +1,30 @@ #pragma once -#include #include +#include -namespace L4 -{ +namespace L4 { // EpochManagerConfig struct. -struct EpochManagerConfig -{ - // "numActionQueues" indicates how many action containers there will be in order to - // increase the throughput of registering an action. - // "performActionsInParallelThreshold" indicates the threshold value above which - // the actions are performed in parallel. - // "maxNumThreadsToPerformActions" indicates how many threads will be used when - // performing an action in parallel. - explicit EpochManagerConfig( - std::uint32_t epochQueueSize = 1000, - std::chrono::milliseconds epochProcessingInterval = std::chrono::milliseconds{ 1000 }, - std::uint8_t numActionQueues = 1) - : m_epochQueueSize{ epochQueueSize } - , m_epochProcessingInterval{ epochProcessingInterval } - , m_numActionQueues{ numActionQueues } - {} +struct EpochManagerConfig { + // "numActionQueues" indicates how many action containers there will be in + // order to increase the throughput of registering an action. + // "performActionsInParallelThreshold" indicates the threshold value above + // which the actions are performed in parallel. + // "maxNumThreadsToPerformActions" indicates how many threads will be used + // when performing an action in parallel. + explicit EpochManagerConfig( + std::uint32_t epochQueueSize = 1000, + std::chrono::milliseconds epochProcessingInterval = + std::chrono::milliseconds{1000}, + std::uint8_t numActionQueues = 1) + : m_epochQueueSize{epochQueueSize}, + m_epochProcessingInterval{epochProcessingInterval}, + m_numActionQueues{numActionQueues} {} - std::uint32_t m_epochQueueSize; - std::chrono::milliseconds m_epochProcessingInterval; - std::uint8_t m_numActionQueues; + std::uint32_t m_epochQueueSize; + std::chrono::milliseconds m_epochProcessingInterval; + std::uint8_t m_numActionQueues; }; -} // namespace L4 +} // namespace L4 diff --git a/inc/L4/Epoch/EpochActionManager.h b/inc/L4/Epoch/EpochActionManager.h index 8e82b0f..ce8e0f6 100644 --- a/inc/L4/Epoch/EpochActionManager.h +++ b/inc/L4/Epoch/EpochActionManager.h @@ -11,53 +11,51 @@ #include "IEpochActionManager.h" #include "Utils/Lock.h" -namespace L4 -{ +namespace L4 { +// EpochActionManager provides functionalities to add actions at an epoch and to +// perform actions up to the given epoch. +class EpochActionManager { + public: + // "numActionQueues" indicates how many action containers there will be in + // order to increase the throughput of registering an action. This will be + // re-calculated to the next highest power of two so that the "&" operator can + // be used for accessing the next queue. + explicit EpochActionManager(std::uint8_t numActionQueues); -// EpochActionManager provides functionalities to add actions at an epoch and to perform -// actions up to the given epoch. -class EpochActionManager -{ -public: - // "numActionQueues" indicates how many action containers there will be in order to - // increase the throughput of registering an action. This will be re-calculated to - // the next highest power of two so that the "&" operator can be used for accessing - // the next queue. - explicit EpochActionManager(std::uint8_t numActionQueues); + // Adds an action at a given epoch counter. + // This function is thread-safe. + void RegisterAction(std::uint64_t epochCounter, + IEpochActionManager::Action&& action); - // Adds an action at a given epoch counter. - // This function is thread-safe. - void RegisterAction(std::uint64_t epochCounter, IEpochActionManager::Action&& action); + // Perform actions whose associated epoch counter value is less than + // the given epoch counter value, and returns the number of actions performed. + std::uint64_t PerformActions(std::uint64_t epochCounter); - // Perform actions whose associated epoch counter value is less than - // the given epoch counter value, and returns the number of actions performed. - std::uint64_t PerformActions(std::uint64_t epochCounter); + EpochActionManager(const EpochActionManager&) = delete; + EpochActionManager& operator=(const EpochActionManager&) = delete; - EpochActionManager(const EpochActionManager&) = delete; - EpochActionManager& operator=(const EpochActionManager&) = delete; + private: + using Mutex = Utils::CriticalSection; + using Lock = std::lock_guard; -private: - using Mutex = Utils::CriticalSection; - using Lock = std::lock_guard; + using Actions = std::vector; - using Actions = std::vector; + // The following structure needs to be sorted by the epoch counter. + // If the performance of using std::map becomes an issue, we can revisit this. + using EpochToActions = std::map; - // The following structure needs to be sorted by the epoch counter. - // If the performance of using std::map becomes an issue, we can revisit this. - using EpochToActions = std::map; + using EpochToActionsWithLock = + std::tuple, EpochToActions>; - using EpochToActionsWithLock = std::tuple, EpochToActions>; + // Run actions based on the configuration. + void ApplyActions(Actions& actions); - // Run actions based on the configuration. - void ApplyActions(Actions& actions); + // Stores mapping from a epoch counter to actions to perform. + std::vector m_epochToActionsList; - // Stores mapping from a epoch counter to actions to perform. - std::vector m_epochToActionsList; - - // Used to point to the next EpochToActions to simulate round-robin access. - std::atomic m_counter; + // Used to point to the next EpochToActions to simulate round-robin access. + std::atomic m_counter; }; - -} // namespace L4 \ No newline at end of file +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Epoch/EpochQueue.h b/inc/L4/Epoch/EpochQueue.h index 241d3b6..6d0a02e 100644 --- a/inc/L4/Epoch/EpochQueue.h +++ b/inc/L4/Epoch/EpochQueue.h @@ -7,159 +7,142 @@ #include "Utils/Exception.h" #include "Utils/Lock.h" -namespace L4 -{ +namespace L4 { // EpochQueue struct represents reference counts for each epoch. -// Each value of the queue (fixed-size array) is the reference counts at an index, -// where an index represents an epoch (time). -template < - typename TSharableLock, - typename TExclusiveLock, - typename Allocator = std::allocator -> -struct EpochQueue -{ - static_assert( - std::is_same::value, - "mutex type should be the same"); +// Each value of the queue (fixed-size array) is the reference counts at an +// index, where an index represents an epoch (time). +template > +struct EpochQueue { + static_assert(std::is_same::value, + "mutex type should be the same"); -public: - EpochQueue( - std::uint64_t epochCounter, - std::uint32_t queueSize, - Allocator allocator = Allocator()) - : m_frontIndex{ epochCounter } - , m_backIndex{ epochCounter } - , m_mutexForBackIndex{} - , m_refCounts{ queueSize, typename Allocator::template rebind::other(allocator) } - { - if (queueSize == 0U) - { - throw RuntimeException("Zero queue size is not allowed."); - } + public: + EpochQueue(std::uint64_t epochCounter, + std::uint32_t queueSize, + Allocator allocator = Allocator()) + : m_frontIndex{epochCounter}, + m_backIndex{epochCounter}, + m_mutexForBackIndex{}, + m_refCounts{ + queueSize, + typename Allocator::template rebind::other(allocator)} { + if (queueSize == 0U) { + throw RuntimeException("Zero queue size is not allowed."); } + } - using SharableLock = TSharableLock; - using ExclusiveLock = TExclusiveLock; - using RefCount = std::atomic; - using RefCounts = Interprocess::Container::Vector< - RefCount, - typename Allocator::template rebind::other>; + using SharableLock = TSharableLock; + using ExclusiveLock = TExclusiveLock; + using RefCount = std::atomic; + using RefCounts = Interprocess::Container:: + Vector::other>; - // The followings (m_frontIndex and m_backIndex) are - // accessed/updated only by the owner thread (only one thread), thus - // they don't require any synchronization. - std::size_t m_frontIndex; + // The followings (m_frontIndex and m_backIndex) are + // accessed/updated only by the owner thread (only one thread), thus + // they don't require any synchronization. + std::size_t m_frontIndex; - // Back index represents the latest epoch counter value. Note that - // this is accessed/updated by multiple threads, thus requires - // synchronization. - std::size_t m_backIndex; + // Back index represents the latest epoch counter value. Note that + // this is accessed/updated by multiple threads, thus requires + // synchronization. + std::size_t m_backIndex; - // Read/Write lock for m_backIndex. - typename SharableLock::mutex_type m_mutexForBackIndex; + // Read/Write lock for m_backIndex. + typename SharableLock::mutex_type m_mutexForBackIndex; - // Reference counts per epoch count. - // The index represents the epoch counter value and the value represents the reference counts. - RefCounts m_refCounts; + // Reference counts per epoch count. + // The index represents the epoch counter value and the value represents the + // reference counts. + RefCounts m_refCounts; }; - // EpochRefManager provides functionality of adding/removing references // to the epoch counter. template -class EpochRefManager -{ -public: - explicit EpochRefManager(EpochQueue& epochQueue) - : m_epochQueue(epochQueue) - {} +class EpochRefManager { + public: + explicit EpochRefManager(EpochQueue& epochQueue) : m_epochQueue(epochQueue) {} - // Increment a reference to the current epoch counter. - // This function is thread-safe. - std::uint64_t AddRef() - { - // The synchronization is needed for EpochCounterManager::AddNewEpoch(). - typename EpochQueue::SharableLock lock(m_epochQueue.m_mutexForBackIndex); + // Increment a reference to the current epoch counter. + // This function is thread-safe. + std::uint64_t AddRef() { + // The synchronization is needed for EpochCounterManager::AddNewEpoch(). + typename EpochQueue::SharableLock lock(m_epochQueue.m_mutexForBackIndex); - ++m_epochQueue.m_refCounts[m_epochQueue.m_backIndex % m_epochQueue.m_refCounts.size()]; + ++m_epochQueue.m_refCounts[m_epochQueue.m_backIndex % + m_epochQueue.m_refCounts.size()]; - return m_epochQueue.m_backIndex; + return m_epochQueue.m_backIndex; + } + + // Decrement a reference count for the given epoch counter. + // This function is thread-safe. + void RemoveRef(std::uint64_t epochCounter) { + auto& refCounter = + m_epochQueue + .m_refCounts[epochCounter % m_epochQueue.m_refCounts.size()]; + + if (refCounter == 0) { + throw RuntimeException("Reference counter is invalid."); } + --refCounter; + } - // Decrement a reference count for the given epoch counter. - // This function is thread-safe. - void RemoveRef(std::uint64_t epochCounter) - { - auto& refCounter = m_epochQueue.m_refCounts[epochCounter % m_epochQueue.m_refCounts.size()]; + EpochRefManager(const EpochRefManager&) = delete; + EpochRefManager& operator=(const EpochRefManager&) = delete; - if (refCounter == 0) - { - throw RuntimeException("Reference counter is invalid."); - } - - --refCounter; - } - - EpochRefManager(const EpochRefManager&) = delete; - EpochRefManager& operator=(const EpochRefManager&) = delete; - -private: - EpochQueue& m_epochQueue; + private: + EpochQueue& m_epochQueue; }; - -// EpochCounterManager provides functionality of updating the current epoch counter -// and getting the latest unreferenced epoch counter. +// EpochCounterManager provides functionality of updating the current epoch +// counter and getting the latest unreferenced epoch counter. template -class EpochCounterManager -{ -public: - explicit EpochCounterManager(EpochQueue& epochQueue) - : m_epochQueue(epochQueue) - {} +class EpochCounterManager { + public: + explicit EpochCounterManager(EpochQueue& epochQueue) + : m_epochQueue(epochQueue) {} - // Increments the current epoch count by one. - // This function is thread-safe. - void AddNewEpoch() - { - // The synchronization is needed for EpochRefManager::AddRef(). - typename EpochQueue::ExclusiveLock lock(m_epochQueue.m_mutexForBackIndex); + // Increments the current epoch count by one. + // This function is thread-safe. + void AddNewEpoch() { + // The synchronization is needed for EpochRefManager::AddRef(). + typename EpochQueue::ExclusiveLock lock(m_epochQueue.m_mutexForBackIndex); - ++m_epochQueue.m_backIndex; + ++m_epochQueue.m_backIndex; - // TODO: check for the overwrap and throw. + // TODO: check for the overwrap and throw. + } + + // Returns the epoch count in the queue where it is the biggest epoch + // count such that all other epoch counts' references are zeros. + // Note that this function is NOT thread safe, and should be run on the + // same thread as the one that calls AddNewEpoch(). + std::uint64_t RemoveUnreferenceEpochCounters() { + while (m_epochQueue.m_backIndex > m_epochQueue.m_frontIndex) { + if (m_epochQueue.m_refCounts[m_epochQueue.m_frontIndex % + m_epochQueue.m_refCounts.size()] == 0U) { + ++m_epochQueue.m_frontIndex; + } else { + // There are references to the front of the queue and will return this + // front index. + break; + } } - // Returns the epoch count in the queue where it is the biggest epoch - // count such that all other epoch counts' references are zeros. - // Note that this function is NOT thread safe, and should be run on the - // same thread as the one that calls AddNewEpoch(). - std::uint64_t RemoveUnreferenceEpochCounters() - { - while (m_epochQueue.m_backIndex > m_epochQueue.m_frontIndex) - { - if (m_epochQueue.m_refCounts[m_epochQueue.m_frontIndex % m_epochQueue.m_refCounts.size()] == 0U) - { - ++m_epochQueue.m_frontIndex; - } - else - { - // There are references to the front of the queue and will return this front index. - break; - } - } + return m_epochQueue.m_frontIndex; + } - return m_epochQueue.m_frontIndex; - } + EpochCounterManager(const EpochCounterManager&) = delete; + EpochCounterManager& operator=(const EpochCounterManager&) = delete; - EpochCounterManager(const EpochCounterManager&) = delete; - EpochCounterManager& operator=(const EpochCounterManager&) = delete; - -private: - EpochQueue& m_epochQueue; + private: + EpochQueue& m_epochQueue; }; -} // namespace L4 +} // namespace L4 diff --git a/inc/L4/Epoch/EpochRefPolicy.h b/inc/L4/Epoch/EpochRefPolicy.h index 435c98f..37a6784 100644 --- a/inc/L4/Epoch/EpochRefPolicy.h +++ b/inc/L4/Epoch/EpochRefPolicy.h @@ -1,42 +1,37 @@ #pragma once -#include #include +#include -namespace L4 -{ +namespace L4 { // EpochRefPolicy class template -class EpochRefPolicy -{ -public: - explicit EpochRefPolicy(EpochRefManager& epochRefManager) - : m_epochRefManager{ epochRefManager } - , m_epochCounter{ m_epochRefManager.AddRef() } - {} +class EpochRefPolicy { + public: + explicit EpochRefPolicy(EpochRefManager& epochRefManager) + : m_epochRefManager{epochRefManager}, + m_epochCounter{m_epochRefManager.AddRef()} {} - EpochRefPolicy(EpochRefPolicy&& epochRefPolicy) - : m_epochRefManager{ epochRefPolicy.m_epochRefManager } - , m_epochCounter{ epochRefPolicy.m_epochCounter } - { - epochRefPolicy.m_epochCounter = boost::integer_traits::const_max; + EpochRefPolicy(EpochRefPolicy&& epochRefPolicy) + : m_epochRefManager{epochRefPolicy.m_epochRefManager}, + m_epochCounter{epochRefPolicy.m_epochCounter} { + epochRefPolicy.m_epochCounter = + boost::integer_traits::const_max; + } + + ~EpochRefPolicy() { + if (m_epochCounter != boost::integer_traits::const_max) { + m_epochRefManager.RemoveRef(m_epochCounter); } + } - ~EpochRefPolicy() - { - if (m_epochCounter != boost::integer_traits::const_max) - { - m_epochRefManager.RemoveRef(m_epochCounter); - } - } + EpochRefPolicy(const EpochRefPolicy&) = delete; + EpochRefPolicy& operator=(const EpochRefPolicy&) = delete; - EpochRefPolicy(const EpochRefPolicy&) = delete; - EpochRefPolicy& operator=(const EpochRefPolicy&) = delete; - -private: - EpochRefManager& m_epochRefManager; - std::uint64_t m_epochCounter; + private: + EpochRefManager& m_epochRefManager; + std::uint64_t m_epochCounter; }; -} // namespace L4 \ No newline at end of file +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Epoch/IEpochActionManager.h b/inc/L4/Epoch/IEpochActionManager.h index 3bd9297..5223faf 100644 --- a/inc/L4/Epoch/IEpochActionManager.h +++ b/inc/L4/Epoch/IEpochActionManager.h @@ -2,21 +2,17 @@ #include -namespace L4 -{ - +namespace L4 { // IEpochActionManager interface exposes an API for registering an Action. -struct IEpochActionManager -{ - using Action = std::function; +struct IEpochActionManager { + using Action = std::function; - virtual ~IEpochActionManager() {}; + virtual ~IEpochActionManager(){}; - // Register actions on the latest epoch in the queue and the action is - // performed when the epoch is removed from the queue. - virtual void RegisterAction(Action&& action) = 0; + // Register actions on the latest epoch in the queue and the action is + // performed when the epoch is removed from the queue. + virtual void RegisterAction(Action&& action) = 0; }; - -} // namespace L4 \ No newline at end of file +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/Cache/HashTable.h b/inc/L4/HashTable/Cache/HashTable.h index 66ccaa8..4ddaad9 100644 --- a/inc/L4/HashTable/Cache/HashTable.h +++ b/inc/L4/HashTable/Cache/HashTable.h @@ -4,392 +4,352 @@ #include #include #include -#include "detail/ToRawPointer.h" #include "Epoch/IEpochActionManager.h" +#include "HashTable/Cache/Metadata.h" #include "HashTable/IHashTable.h" #include "HashTable/ReadWrite/HashTable.h" -#include "HashTable/Cache/Metadata.h" #include "Utils/Clock.h" +#include "detail/ToRawPointer.h" -namespace L4 -{ -namespace HashTable -{ -namespace Cache -{ +namespace L4 { +namespace HashTable { +namespace Cache { // ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides // the functionality to read data given a key. template class ReadOnlyHashTable - : public virtual ReadWrite::ReadOnlyHashTable - , protected Clock -{ -public: - using Base = ReadWrite::ReadOnlyHashTable; - using HashTable = typename Base::HashTable; + : public virtual ReadWrite::ReadOnlyHashTable, + protected Clock { + public: + using Base = ReadWrite::ReadOnlyHashTable; + using HashTable = typename Base::HashTable; - using Key = typename Base::Key; - using Value = typename Base::Value; - using IIteratorPtr = typename Base::IIteratorPtr; + using Key = typename Base::Key; + using Value = typename Base::Value; + using IIteratorPtr = typename Base::IIteratorPtr; - class Iterator; + class Iterator; - ReadOnlyHashTable( - HashTable& hashTable, - std::chrono::seconds recordTimeToLive) - : Base( - hashTable, - RecordSerializer{ - hashTable.m_setting.m_fixedKeySize, - hashTable.m_setting.m_fixedValueSize, - Metadata::c_metaDataSize }) - , m_recordTimeToLive{ recordTimeToLive } - {} + ReadOnlyHashTable(HashTable& hashTable, std::chrono::seconds recordTimeToLive) + : Base(hashTable, + RecordSerializer{hashTable.m_setting.m_fixedKeySize, + hashTable.m_setting.m_fixedValueSize, + Metadata::c_metaDataSize}), + m_recordTimeToLive{recordTimeToLive} {} - virtual bool Get(const Key& key, Value& value) const override - { - const auto status = GetInternal(key, value); + virtual bool Get(const Key& key, Value& value) const override { + const auto status = GetInternal(key, value); - // Note that the following const_cast is safe and necessary to update cache hit information. - const_cast(this->GetPerfData()).Increment( - status - ? HashTablePerfCounter::CacheHitCount - : HashTablePerfCounter::CacheMissCount); + // Note that the following const_cast is safe and necessary to update cache + // hit information. + const_cast(this->GetPerfData()) + .Increment(status ? HashTablePerfCounter::CacheHitCount + : HashTablePerfCounter::CacheMissCount); - return status; + return status; + } + + virtual IIteratorPtr GetIterator() const override { + return std::make_unique( + this->m_hashTable, this->m_recordSerializer, m_recordTimeToLive, + this->GetCurrentEpochTime()); + } + + ReadOnlyHashTable(const ReadOnlyHashTable&) = delete; + ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete; + + protected: + bool GetInternal(const Key& key, Value& value) const { + if (!Base::Get(key, value)) { + return false; } - virtual IIteratorPtr GetIterator() const override - { - return std::make_unique( - this->m_hashTable, - this->m_recordSerializer, - m_recordTimeToLive, - this->GetCurrentEpochTime()); + assert(value.m_size > Metadata::c_metaDataSize); + + // If the record with the given key is found, check if the record is expired + // or not. Note that the following const_cast is safe and necessary to + // update the access status. + Metadata metaData{const_cast( + reinterpret_cast(value.m_data))}; + if (metaData.IsExpired(this->GetCurrentEpochTime(), m_recordTimeToLive)) { + return false; } - ReadOnlyHashTable(const ReadOnlyHashTable&) = delete; - ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete; + metaData.UpdateAccessStatus(true); -protected: - bool GetInternal(const Key& key, Value& value) const - { - if (!Base::Get(key, value)) - { - return false; - } + value.m_data += Metadata::c_metaDataSize; + value.m_size -= Metadata::c_metaDataSize; - assert(value.m_size > Metadata::c_metaDataSize); + return true; + } - // If the record with the given key is found, check if the record is expired or not. - // Note that the following const_cast is safe and necessary to update the access status. - Metadata metaData{ const_cast(reinterpret_cast(value.m_data)) }; - if (metaData.IsExpired(this->GetCurrentEpochTime(), m_recordTimeToLive)) - { - return false; - } - - metaData.UpdateAccessStatus(true); - - value.m_data += Metadata::c_metaDataSize; - value.m_size -= Metadata::c_metaDataSize; - - return true; - } - - std::chrono::seconds m_recordTimeToLive; + std::chrono::seconds m_recordTimeToLive; }; - template -class ReadOnlyHashTable::Iterator : public Base::Iterator -{ -public: - using BaseIterator = typename Base::Iterator; +class ReadOnlyHashTable::Iterator : public Base::Iterator { + public: + using BaseIterator = typename Base::Iterator; - Iterator( - const HashTable& hashTable, - const RecordSerializer& recordDeserializer, - std::chrono::seconds recordTimeToLive, - std::chrono::seconds currentEpochTime) - : BaseIterator(hashTable, recordDeserializer) - , m_recordTimeToLive{ recordTimeToLive } - , m_currentEpochTime{ currentEpochTime } - {} + Iterator(const HashTable& hashTable, + const RecordSerializer& recordDeserializer, + std::chrono::seconds recordTimeToLive, + std::chrono::seconds currentEpochTime) + : BaseIterator(hashTable, recordDeserializer), + m_recordTimeToLive{recordTimeToLive}, + m_currentEpochTime{currentEpochTime} {} - Iterator(Iterator&& other) - : BaseIterator(std::move(other)) - , m_recordTimeToLive{ std::move(other.m_recordTimeToLive) } - , m_currentEpochTime{ std::move(other.m_currentEpochTime) } - {} + Iterator(Iterator&& other) + : BaseIterator(std::move(other)), + m_recordTimeToLive{std::move(other.m_recordTimeToLive)}, + m_currentEpochTime{std::move(other.m_currentEpochTime)} {} - bool MoveNext() override - { - if (!BaseIterator::MoveNext()) - { - return false; - } - - do - { - const Metadata metaData{ - const_cast( - reinterpret_cast( - BaseIterator::GetValue().m_data)) }; - - if (!metaData.IsExpired(m_currentEpochTime, m_recordTimeToLive)) - { - return true; - } - } while (BaseIterator::MoveNext()); - - return false; + bool MoveNext() override { + if (!BaseIterator::MoveNext()) { + return false; } - Value GetValue() const override - { - auto value = BaseIterator::GetValue(); - value.m_data += Metadata::c_metaDataSize; - value.m_size -= Metadata::c_metaDataSize; + do { + const Metadata metaData{ + const_cast(reinterpret_cast( + BaseIterator::GetValue().m_data))}; - return value; - } + if (!metaData.IsExpired(m_currentEpochTime, m_recordTimeToLive)) { + return true; + } + } while (BaseIterator::MoveNext()); -private: - std::chrono::seconds m_recordTimeToLive; - std::chrono::seconds m_currentEpochTime; + return false; + } + + Value GetValue() const override { + auto value = BaseIterator::GetValue(); + value.m_data += Metadata::c_metaDataSize; + value.m_size -= Metadata::c_metaDataSize; + + return value; + } + + private: + std::chrono::seconds m_recordTimeToLive; + std::chrono::seconds m_currentEpochTime; }; - -// The following warning is from the virtual inheritance and safe to disable in this case. -// https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx +// The following warning is from the virtual inheritance and safe to disable in +// this case. https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx #pragma warning(push) -#pragma warning(disable:4250) +#pragma warning(disable : 4250) -// WritableHashTable class implements IWritableHashTable interface and also provides -// the read only access (Get()) to the hash table. +// WritableHashTable class implements IWritableHashTable interface and also +// provides the read only access (Get()) to the hash table. template -class WritableHashTable - : public ReadOnlyHashTable - , public ReadWrite::WritableHashTable -{ -public: - using ReadOnlyBase = ReadOnlyHashTable; - using WritableBase = typename ReadWrite::WritableHashTable; - using HashTable = typename ReadOnlyBase::HashTable; +class WritableHashTable : public ReadOnlyHashTable, + public ReadWrite::WritableHashTable { + public: + using ReadOnlyBase = ReadOnlyHashTable; + using WritableBase = typename ReadWrite::WritableHashTable; + using HashTable = typename ReadOnlyBase::HashTable; - using Key = typename ReadOnlyBase::Key; - using Value = typename ReadOnlyBase::Value; - using ISerializerPtr = typename WritableBase::ISerializerPtr; + using Key = typename ReadOnlyBase::Key; + using Value = typename ReadOnlyBase::Value; + using ISerializerPtr = typename WritableBase::ISerializerPtr; - WritableHashTable( - HashTable& hashTable, - IEpochActionManager& epochManager, - std::uint64_t maxCacheSizeInBytes, - std::chrono::seconds recordTimeToLive, - bool forceTimeBasedEviction) - : ReadOnlyBase::Base( + WritableHashTable(HashTable& hashTable, + IEpochActionManager& epochManager, + std::uint64_t maxCacheSizeInBytes, + std::chrono::seconds recordTimeToLive, + bool forceTimeBasedEviction) + : ReadOnlyBase::Base( hashTable, - RecordSerializer{ - hashTable.m_setting.m_fixedKeySize, - hashTable.m_setting.m_fixedValueSize, - Metadata::c_metaDataSize }) - , ReadOnlyBase(hashTable, recordTimeToLive) - , WritableBase(hashTable, epochManager) - , m_maxCacheSizeInBytes{ maxCacheSizeInBytes } - , m_forceTimeBasedEviction{ forceTimeBasedEviction } - , m_currentEvictBucketIndex{ 0U } - {} + RecordSerializer{hashTable.m_setting.m_fixedKeySize, + hashTable.m_setting.m_fixedValueSize, + Metadata::c_metaDataSize}), + ReadOnlyBase(hashTable, recordTimeToLive), + WritableBase(hashTable, epochManager), + m_maxCacheSizeInBytes{maxCacheSizeInBytes}, + m_forceTimeBasedEviction{forceTimeBasedEviction}, + m_currentEvictBucketIndex{0U} {} - using ReadOnlyBase::Get; - using ReadOnlyBase::GetPerfData; + using ReadOnlyBase::Get; + using ReadOnlyBase::GetPerfData; - virtual void Add(const Key& key, const Value& value) override - { - if (m_forceTimeBasedEviction) - { - EvictBasedOnTime(key); + virtual void Add(const Key& key, const Value& value) override { + if (m_forceTimeBasedEviction) { + EvictBasedOnTime(key); + } + + Evict(key.m_size + value.m_size + Metadata::c_metaDataSize); + + WritableBase::Add(CreateRecordBuffer(key, value)); + } + + virtual ISerializerPtr GetSerializer() const override { + throw std::runtime_error("Not implemented yet."); + } + + private: + using Mutex = std::mutex; + using Lock = std::lock_guard; + + void EvictBasedOnTime(const Key& key) { + const auto bucketIndex = this->GetBucketInfo(key).first; + + auto* entry = &(this->m_hashTable.m_buckets[bucketIndex]); + + const auto curEpochTime = this->GetCurrentEpochTime(); + + typename HashTable::Lock lock{this->m_hashTable.GetMutex(bucketIndex)}; + + while (entry != nullptr) { + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) { + const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed); + + if (data != nullptr) { + const Metadata metadata{ + const_cast(reinterpret_cast( + this->m_recordSerializer.Deserialize(*data).m_value.m_data))}; + + if (metadata.IsExpired(curEpochTime, this->m_recordTimeToLive)) { + WritableBase::Remove(*entry, i); + this->m_hashTable.m_perfData.Increment( + HashTablePerfCounter::EvictedRecordsCount); + } } + } - Evict(key.m_size + value.m_size + Metadata::c_metaDataSize); + entry = entry->m_next.Load(std::memory_order_relaxed); + } + } - WritableBase::Add(CreateRecordBuffer(key, value)); + // Evict uses CLOCK algorithm to evict records based on expiration and access + // status until the number of bytes freed match the given number of bytes + // needed. + void Evict(std::uint64_t bytesNeeded) { + std::uint64_t numBytesToFree = CalculateNumBytesToFree(bytesNeeded); + if (numBytesToFree == 0U) { + return; } - virtual ISerializerPtr GetSerializer() const override - { - throw std::runtime_error("Not implemented yet."); + // Start evicting records with a lock. + Lock evictLock{m_evictMutex}; + + // Recalculate the number of bytes to free since other thread may have + // already evicted. + numBytesToFree = CalculateNumBytesToFree(bytesNeeded); + if (numBytesToFree == 0U) { + return; } -private: - using Mutex = std::mutex; - using Lock = std::lock_guard; + const auto curEpochTime = this->GetCurrentEpochTime(); - void EvictBasedOnTime(const Key& key) - { - const auto bucketIndex = this->GetBucketInfo(key).first; + // The max number of iterations we are going through per eviction is twice + // the number of buckets so that it can clear the access status. Note that + // this is the worst case scenario and the eviction process should exit much + // quicker in a normal case. + auto& buckets = this->m_hashTable.m_buckets; + std::uint64_t numIterationsRemaining = buckets.size() * 2U; - auto* entry = &(this->m_hashTable.m_buckets[bucketIndex]); + while (numBytesToFree > 0U && numIterationsRemaining-- > 0U) { + const auto currentBucketIndex = + m_currentEvictBucketIndex++ % buckets.size(); + auto& bucket = buckets[currentBucketIndex]; - const auto curEpochTime = this->GetCurrentEpochTime(); + // Lock the bucket since another thread can bypass Evict() since + // TotalDataSize can be updated before the lock on m_evictMutex is + // released. + typename HashTable::UniqueLock lock{ + this->m_hashTable.GetMutex(currentBucketIndex)}; + typename HashTable::Entry* entry = &bucket; - typename HashTable::Lock lock{ this->m_hashTable.GetMutex(bucketIndex) }; + while (entry != nullptr) { + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) { + const auto data = + entry->m_dataList[i].Load(std::memory_order_relaxed); - while (entry != nullptr) - { - for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) - { - const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed); + if (data != nullptr) { + const auto record = this->m_recordSerializer.Deserialize(*data); + const auto& value = record.m_value; - if (data != nullptr) - { - const Metadata metadata{ - const_cast( - reinterpret_cast( - this->m_recordSerializer.Deserialize(*data).m_value.m_data)) }; + Metadata metadata{const_cast( + reinterpret_cast(value.m_data))}; - if (metadata.IsExpired(curEpochTime, this->m_recordTimeToLive)) - { - WritableBase::Remove(*entry, i); - this->m_hashTable.m_perfData.Increment(HashTablePerfCounter::EvictedRecordsCount); - } - } + // Evict this record if + // 1: the record is expired, or + // 2: the entry is not recently accessed (and unset the access bit + // if set). + if (metadata.IsExpired(curEpochTime, this->m_recordTimeToLive) || + !metadata.UpdateAccessStatus(false)) { + const auto numBytesFreed = record.m_key.m_size + value.m_size; + numBytesToFree = (numBytesFreed >= numBytesToFree) + ? 0U + : numBytesToFree - numBytesFreed; + + WritableBase::Remove(*entry, i); + + this->m_hashTable.m_perfData.Increment( + HashTablePerfCounter::EvictedRecordsCount); } - - entry = entry->m_next.Load(std::memory_order_relaxed); + } } + + entry = entry->m_next.Load(std::memory_order_relaxed); + } + } + } + + // Given the number of bytes needed, it calculates the number of bytes + // to free based on the max cache size. + std::uint64_t CalculateNumBytesToFree(std::uint64_t bytesNeeded) const { + const auto& perfData = GetPerfData(); + + const std::uint64_t totalDataSize = + perfData.Get(HashTablePerfCounter::TotalKeySize) + + perfData.Get(HashTablePerfCounter::TotalValueSize) + + perfData.Get(HashTablePerfCounter::TotalIndexSize); + + if ((bytesNeeded < m_maxCacheSizeInBytes) && + (totalDataSize + bytesNeeded <= m_maxCacheSizeInBytes)) { + // There are enough free bytes. + return 0U; } - // Evict uses CLOCK algorithm to evict records based on expiration and access status - // until the number of bytes freed match the given number of bytes needed. - void Evict(std::uint64_t bytesNeeded) - { - std::uint64_t numBytesToFree = CalculateNumBytesToFree(bytesNeeded); - if (numBytesToFree == 0U) - { - return; - } + // (totalDataSize > m_maxCacheSizeInBytes) case is possible: + // 1) If multiple threads are evicting and adding at the same time. + // For example, if thread A was evicting and thread B could have + // used the evicted bytes before thread A consumed. + // 2) If max cache size is set lower than expectation. + return (totalDataSize > m_maxCacheSizeInBytes) + ? (totalDataSize - m_maxCacheSizeInBytes + bytesNeeded) + : bytesNeeded; + } - // Start evicting records with a lock. - Lock evictLock{ m_evictMutex }; + RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value) { + const auto bufferSize = + this->m_recordSerializer.CalculateBufferSize(key, value); + auto buffer = Detail::to_raw_pointer( + this->m_hashTable.template GetAllocator().allocate( + bufferSize)); - // Recalculate the number of bytes to free since other thread may have already evicted. - numBytesToFree = CalculateNumBytesToFree(bytesNeeded); - if (numBytesToFree == 0U) - { - return; - } + std::uint32_t metaDataBuffer; + Metadata{&metaDataBuffer, this->GetCurrentEpochTime()}; - const auto curEpochTime = this->GetCurrentEpochTime(); + // 4-byte Metadata is inserted between key and value buffer. + return this->m_recordSerializer.Serialize( + key, value, + Value{reinterpret_cast(&metaDataBuffer), + sizeof(metaDataBuffer)}, + buffer, bufferSize); + } - // The max number of iterations we are going through per eviction is twice the number - // of buckets so that it can clear the access status. Note that this is the worst - // case scenario and the eviction process should exit much quicker in a normal case. - auto& buckets = this->m_hashTable.m_buckets; - std::uint64_t numIterationsRemaining = buckets.size() * 2U; - - while (numBytesToFree > 0U && numIterationsRemaining-- > 0U) - { - const auto currentBucketIndex = m_currentEvictBucketIndex++ % buckets.size(); - auto& bucket = buckets[currentBucketIndex]; - - // Lock the bucket since another thread can bypass Evict() since TotalDataSize can - // be updated before the lock on m_evictMutex is released. - typename HashTable::UniqueLock lock{ this->m_hashTable.GetMutex(currentBucketIndex) }; - typename HashTable::Entry* entry = &bucket; - - while (entry != nullptr) - { - for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) - { - const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed); - - if (data != nullptr) - { - const auto record = this->m_recordSerializer.Deserialize(*data); - const auto& value = record.m_value; - - Metadata metadata{ - const_cast( - reinterpret_cast( - value.m_data)) }; - - // Evict this record if - // 1: the record is expired, or - // 2: the entry is not recently accessed (and unset the access bit if set). - if (metadata.IsExpired(curEpochTime, this->m_recordTimeToLive) - || !metadata.UpdateAccessStatus(false)) - { - const auto numBytesFreed = record.m_key.m_size + value.m_size; - numBytesToFree = (numBytesFreed >= numBytesToFree) ? 0U : numBytesToFree - numBytesFreed; - - WritableBase::Remove(*entry, i); - - this->m_hashTable.m_perfData.Increment(HashTablePerfCounter::EvictedRecordsCount); - } - } - } - - entry = entry->m_next.Load(std::memory_order_relaxed); - } - } - } - - // Given the number of bytes needed, it calculates the number of bytes - // to free based on the max cache size. - std::uint64_t CalculateNumBytesToFree(std::uint64_t bytesNeeded) const - { - const auto& perfData = GetPerfData(); - - const std::uint64_t totalDataSize = - perfData.Get(HashTablePerfCounter::TotalKeySize) - + perfData.Get(HashTablePerfCounter::TotalValueSize) - + perfData.Get(HashTablePerfCounter::TotalIndexSize); - - if ((bytesNeeded < m_maxCacheSizeInBytes) - && (totalDataSize + bytesNeeded <= m_maxCacheSizeInBytes)) - { - // There are enough free bytes. - return 0U; - } - - // (totalDataSize > m_maxCacheSizeInBytes) case is possible: - // 1) If multiple threads are evicting and adding at the same time. - // For example, if thread A was evicting and thread B could have - // used the evicted bytes before thread A consumed. - // 2) If max cache size is set lower than expectation. - return (totalDataSize > m_maxCacheSizeInBytes) - ? (totalDataSize - m_maxCacheSizeInBytes + bytesNeeded) - : bytesNeeded; - } - - RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value) - { - const auto bufferSize = this->m_recordSerializer.CalculateBufferSize(key, value); - auto buffer = Detail::to_raw_pointer( - this->m_hashTable.template GetAllocator().allocate(bufferSize)); - - std::uint32_t metaDataBuffer; - Metadata{ &metaDataBuffer, this->GetCurrentEpochTime() }; - - // 4-byte Metadata is inserted between key and value buffer. - return this->m_recordSerializer.Serialize( - key, - value, - Value{ reinterpret_cast(&metaDataBuffer), sizeof(metaDataBuffer) }, - buffer, - bufferSize); - } - - Mutex m_evictMutex; - const std::uint64_t m_maxCacheSizeInBytes; - const bool m_forceTimeBasedEviction; - std::uint64_t m_currentEvictBucketIndex; + Mutex m_evictMutex; + const std::uint64_t m_maxCacheSizeInBytes; + const bool m_forceTimeBasedEviction; + std::uint64_t m_currentEvictBucketIndex; }; #pragma warning(pop) -} // namespace Cache -} // namespace HashTable -} // namespace L4 +} // namespace Cache +} // namespace HashTable +} // namespace L4 diff --git a/inc/L4/HashTable/Cache/Metadata.h b/inc/L4/HashTable/Cache/Metadata.h index 2bce511..53ccf7f 100644 --- a/inc/L4/HashTable/Cache/Metadata.h +++ b/inc/L4/HashTable/Cache/Metadata.h @@ -4,113 +4,91 @@ #include #include -namespace L4 -{ -namespace HashTable -{ -namespace Cache -{ - +namespace L4 { +namespace HashTable { +namespace Cache { // Metadata class that stores caching related data. // It stores access bit to indicate whether a record is recently accessed // as well as the epoch time when a record is created. // Note that this works regardless of the alignment of the metadata passed in. -class Metadata -{ -public: - // Constructs Metadata with the current epoch time. - Metadata(std::uint32_t* metadata, std::chrono::seconds curEpochTime) - : Metadata{ metadata } - { - *m_metadata = curEpochTime.count() & s_epochTimeMask; +class Metadata { + public: + // Constructs Metadata with the current epoch time. + Metadata(std::uint32_t* metadata, std::chrono::seconds curEpochTime) + : Metadata{metadata} { + *m_metadata = curEpochTime.count() & s_epochTimeMask; + } + + explicit Metadata(std::uint32_t* metadata) : m_metadata{metadata} { + assert(m_metadata != nullptr); + } + + // Returns the stored epoch time. + std::chrono::seconds GetEpochTime() const { + // *m_metadata even on the not-aligned memory should be fine since + // only the byte that contains the access bit is modified, and + // byte read is atomic. + return std::chrono::seconds{*m_metadata & s_epochTimeMask}; + } + + // Returns true if the stored epoch time is expired based + // on the given current epoch time and time-to-live value. + bool IsExpired(std::chrono::seconds curEpochTime, + std::chrono::seconds timeToLive) const { + assert(curEpochTime >= GetEpochTime()); + return (curEpochTime - GetEpochTime()) > timeToLive; + } + + // Returns true if the access status is on. + bool IsAccessed() const { return !!(GetAccessByte() & s_accessSetMask); } + + // If "set" is true, turn on the access bit in the given metadata and store + // it. If "set" is false, turn off the access bit. Returns true if the given + // metadata's access bit was originally on. + bool UpdateAccessStatus(bool set) { + const auto isAccessBitOn = IsAccessed(); + + // Set the bit only if the bit is not set, and vice versa. + if (set != isAccessBitOn) { + if (set) { + GetAccessByte() |= s_accessSetMask; + } else { + GetAccessByte() &= s_accessUnsetMask; + } } - explicit Metadata(std::uint32_t* metadata) - : m_metadata{ metadata } - { - assert(m_metadata != nullptr); - } + return isAccessBitOn; + } - // Returns the stored epoch time. - std::chrono::seconds GetEpochTime() const - { - // *m_metadata even on the not-aligned memory should be fine since - // only the byte that contains the access bit is modified, and - // byte read is atomic. - return std::chrono::seconds{ *m_metadata & s_epochTimeMask }; - } + static constexpr std::uint16_t c_metaDataSize = sizeof(std::uint32_t); - // Returns true if the stored epoch time is expired based - // on the given current epoch time and time-to-live value. - bool IsExpired( - std::chrono::seconds curEpochTime, - std::chrono::seconds timeToLive) const - { - assert(curEpochTime >= GetEpochTime()); - return (curEpochTime - GetEpochTime()) > timeToLive; - } + private: + std::uint8_t GetAccessByte() const { + return reinterpret_cast(m_metadata)[s_accessBitByte]; + } - // Returns true if the access status is on. - bool IsAccessed() const - { - return !!(GetAccessByte() & s_accessSetMask); - } + std::uint8_t& GetAccessByte() { + return reinterpret_cast(m_metadata)[s_accessBitByte]; + } - // If "set" is true, turn on the access bit in the given metadata and store it. - // If "set" is false, turn off the access bit. - // Returns true if the given metadata's access bit was originally on. - bool UpdateAccessStatus(bool set) - { - const auto isAccessBitOn = IsAccessed(); + // TODO: Create an endian test and assert it. (Works only on little endian). + // The byte that contains the most significant bit. + static constexpr std::uint8_t s_accessBitByte = 3U; - // Set the bit only if the bit is not set, and vice versa. - if (set != isAccessBitOn) - { - if (set) - { - GetAccessByte() |= s_accessSetMask; - } - else - { - GetAccessByte() &= s_accessUnsetMask; - } - } + // Most significant bit is set. + static constexpr std::uint8_t s_accessSetMask = 1U << 7; + static constexpr std::uint8_t s_accessUnsetMask = s_accessSetMask ^ 0xFF; - return isAccessBitOn; - } + // The rest of bits other than the most significant bit are set. + static constexpr std::uint32_t s_epochTimeMask = 0x7FFFFFFF; - static constexpr std::uint16_t c_metaDataSize = sizeof(std::uint32_t); - -private: - std::uint8_t GetAccessByte() const - { - return reinterpret_cast(m_metadata)[s_accessBitByte]; - } - - std::uint8_t& GetAccessByte() - { - return reinterpret_cast(m_metadata)[s_accessBitByte]; - } - - // TODO: Create an endian test and assert it. (Works only on little endian). - // The byte that contains the most significant bit. - static constexpr std::uint8_t s_accessBitByte = 3U; - - // Most significant bit is set. - static constexpr std::uint8_t s_accessSetMask = 1U << 7; - static constexpr std::uint8_t s_accessUnsetMask = s_accessSetMask ^ 0xFF; - - // The rest of bits other than the most significant bit are set. - static constexpr std::uint32_t s_epochTimeMask = 0x7FFFFFFF; - - // The most significant bit is a CLOCK bit. It is set to 1 upon access - // and reset to 0 by the cache eviction. - // The rest of the bits are used for storing the epoch time in seconds. - std::uint32_t* m_metadata = nullptr; + // The most significant bit is a CLOCK bit. It is set to 1 upon access + // and reset to 0 by the cache eviction. + // The rest of the bits are used for storing the epoch time in seconds. + std::uint32_t* m_metadata = nullptr; }; - -} // namespace Cache -} // namespace HashTable -} // namespace L4 +} // namespace Cache +} // namespace HashTable +} // namespace L4 diff --git a/inc/L4/HashTable/Common/Record.h b/inc/L4/HashTable/Common/Record.h index b8b797e..515d741 100644 --- a/inc/L4/HashTable/Common/Record.h +++ b/inc/L4/HashTable/Common/Record.h @@ -4,221 +4,190 @@ #include "HashTable/IHashTable.h" #include "Utils/Exception.h" -namespace L4 -{ -namespace HashTable -{ +namespace L4 { +namespace HashTable { // Record struct consists of key and value pair. -struct Record -{ - using Key = IReadOnlyHashTable::Key; - using Value = IReadOnlyHashTable::Value; +struct Record { + using Key = IReadOnlyHashTable::Key; + using Value = IReadOnlyHashTable::Value; - Record() = default; + Record() = default; - Record( - const Key& key, - const Value& value) - : m_key{ key } - , m_value{ value } - {} + Record(const Key& key, const Value& value) : m_key{key}, m_value{value} {} - Key m_key; - Value m_value; + Key m_key; + Value m_value; }; - // RecordBuffer is a thin wrapper struct around a raw buffer array (pointer). -struct RecordBuffer -{ - std::uint8_t m_buffer[1]; +struct RecordBuffer { + std::uint8_t m_buffer[1]; }; -static_assert( - sizeof(RecordBuffer) == 1, - "RecordBuffer size should be 1 to be a thin wrapper."); +static_assert(sizeof(RecordBuffer) == 1, + "RecordBuffer size should be 1 to be a thin wrapper."); -// RecordSerializer provides a functionality to serialize/deserialize a record information. -class RecordSerializer -{ -public: - using Key = Record::Key; - using Value = Record::Value; - using KeySize = Key::size_type; - using ValueSize = Value::size_type; +// RecordSerializer provides a functionality to serialize/deserialize a record +// information. +class RecordSerializer { + public: + using Key = Record::Key; + using Value = Record::Value; + using KeySize = Key::size_type; + using ValueSize = Value::size_type; - RecordSerializer( - KeySize fixedKeySize, - ValueSize fixedValueSize, - ValueSize metadataSize = 0U) - : m_fixedKeySize{ fixedKeySize } - , m_fixedValueSize{ fixedValueSize } - , m_metadataSize{ metadataSize } - {} + RecordSerializer(KeySize fixedKeySize, + ValueSize fixedValueSize, + ValueSize metadataSize = 0U) + : m_fixedKeySize{fixedKeySize}, + m_fixedValueSize{fixedValueSize}, + m_metadataSize{metadataSize} {} - // Returns the number of bytes needed for serializing the given key and value. - std::size_t CalculateBufferSize(const Key& key, const Value& value) const - { - return - ((m_fixedKeySize != 0) - ? m_fixedKeySize - : (key.m_size + sizeof(KeySize))) - + ((m_fixedValueSize != 0) + // Returns the number of bytes needed for serializing the given key and value. + std::size_t CalculateBufferSize(const Key& key, const Value& value) const { + return ((m_fixedKeySize != 0) ? m_fixedKeySize + : (key.m_size + sizeof(KeySize))) + + ((m_fixedValueSize != 0) ? m_fixedValueSize + m_metadataSize : (value.m_size + sizeof(ValueSize) + m_metadataSize)); - } + } - // Returns the number bytes used for key and value sizes. - std::size_t CalculateRecordOverhead() const - { - return - (m_fixedKeySize != 0 ? 0U : sizeof(KeySize)) - + (m_fixedValueSize != 0 ? 0U : sizeof(ValueSize)); - } + // Returns the number bytes used for key and value sizes. + std::size_t CalculateRecordOverhead() const { + return (m_fixedKeySize != 0 ? 0U : sizeof(KeySize)) + + (m_fixedValueSize != 0 ? 0U : sizeof(ValueSize)); + } - // Serializes the given key and value to the given buffer. - // Note that the buffer size is at least as big as the number of bytes - // returned by CalculateBufferSize(). - RecordBuffer* Serialize( - const Key& key, - const Value& value, - std::uint8_t* const buffer, - std::size_t bufferSize) const - { - Validate(key, value); + // Serializes the given key and value to the given buffer. + // Note that the buffer size is at least as big as the number of bytes + // returned by CalculateBufferSize(). + RecordBuffer* Serialize(const Key& key, + const Value& value, + std::uint8_t* const buffer, + std::size_t bufferSize) const { + Validate(key, value); - assert(CalculateBufferSize(key, value) <= bufferSize); - (void)bufferSize; + assert(CalculateBufferSize(key, value) <= bufferSize); + (void)bufferSize; - const auto start = SerializeSizes(buffer, key.m_size, value.m_size); + const auto start = SerializeSizes(buffer, key.m_size, value.m_size); #if defined(_MSC_VER) - memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size); - memcpy_s(buffer + start + key.m_size, value.m_size, value.m_data, value.m_size); + memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size); + memcpy_s(buffer + start + key.m_size, value.m_size, value.m_data, + value.m_size); #else - memcpy(buffer + start, key.m_data, key.m_size); - memcpy(buffer + start + key.m_size, value.m_data, value.m_size); + memcpy(buffer + start, key.m_data, key.m_size); + memcpy(buffer + start + key.m_size, value.m_data, value.m_size); #endif - return reinterpret_cast(buffer); - } + return reinterpret_cast(buffer); + } - // Serializes the given key, value and meta value to the given buffer. - // The meta value is serialized between key and value. - // Note that the buffer size is at least as big as the number of bytes - // returned by CalculateBufferSize(). - RecordBuffer* Serialize( - const Key& key, - const Value& value, - const Value& metaValue, - std::uint8_t* const buffer, - std::size_t bufferSize) const - { - Validate(key, value, metaValue); + // Serializes the given key, value and meta value to the given buffer. + // The meta value is serialized between key and value. + // Note that the buffer size is at least as big as the number of bytes + // returned by CalculateBufferSize(). + RecordBuffer* Serialize(const Key& key, + const Value& value, + const Value& metaValue, + std::uint8_t* const buffer, + std::size_t bufferSize) const { + Validate(key, value, metaValue); - assert(CalculateBufferSize(key, value) <= bufferSize); - (void)bufferSize; + assert(CalculateBufferSize(key, value) <= bufferSize); + (void)bufferSize; - const auto start = SerializeSizes(buffer, key.m_size, value.m_size + metaValue.m_size); + const auto start = + SerializeSizes(buffer, key.m_size, value.m_size + metaValue.m_size); #if defined(_MSC_VER) - memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size); - memcpy_s(buffer + start + key.m_size, metaValue.m_size, metaValue.m_data, metaValue.m_size); - memcpy_s(buffer + start + key.m_size + metaValue.m_size, value.m_size, value.m_data, value.m_size); + memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size); + memcpy_s(buffer + start + key.m_size, metaValue.m_size, metaValue.m_data, + metaValue.m_size); + memcpy_s(buffer + start + key.m_size + metaValue.m_size, value.m_size, + value.m_data, value.m_size); #else - memcpy(buffer + start, key.m_data, key.m_size); - memcpy(buffer + start + key.m_size, metaValue.m_data, metaValue.m_size); - memcpy(buffer + start + key.m_size + metaValue.m_size, value.m_data, value.m_size); + memcpy(buffer + start, key.m_data, key.m_size); + memcpy(buffer + start + key.m_size, metaValue.m_data, metaValue.m_size); + memcpy(buffer + start + key.m_size + metaValue.m_size, value.m_data, + value.m_size); #endif - return reinterpret_cast(buffer); + return reinterpret_cast(buffer); + } + + // Deserializes the given buffer and returns a Record object. + Record Deserialize(const RecordBuffer& buffer) const { + Record record; + + const auto* dataBuffer = buffer.m_buffer; + + auto& key = record.m_key; + if (m_fixedKeySize != 0) { + key.m_size = m_fixedKeySize; + } else { + key.m_size = *reinterpret_cast(dataBuffer); + dataBuffer += sizeof(KeySize); } - // Deserializes the given buffer and returns a Record object. - Record Deserialize(const RecordBuffer& buffer) const - { - Record record; - - const auto* dataBuffer = buffer.m_buffer; - - auto& key = record.m_key; - if (m_fixedKeySize != 0) - { - key.m_size = m_fixedKeySize; - } - else - { - key.m_size = *reinterpret_cast(dataBuffer); - dataBuffer += sizeof(KeySize); - } - - auto& value = record.m_value; - if (m_fixedValueSize != 0) - { - value.m_size = m_fixedValueSize + m_metadataSize; - } - else - { - value.m_size = *reinterpret_cast(dataBuffer); - dataBuffer += sizeof(ValueSize); - } - - key.m_data = dataBuffer; - value.m_data = dataBuffer + key.m_size; - - return record; + auto& value = record.m_value; + if (m_fixedValueSize != 0) { + value.m_size = m_fixedValueSize + m_metadataSize; + } else { + value.m_size = *reinterpret_cast(dataBuffer); + dataBuffer += sizeof(ValueSize); } -private: - // Validates key and value sizes when fixed sizes are set. - // Throws an exception if invalid sizes are used. - void Validate(const Key& key, const Value& value) const - { - if ((m_fixedKeySize != 0 && key.m_size != m_fixedKeySize) - || (m_fixedValueSize != 0 && value.m_size != m_fixedValueSize)) - { - throw RuntimeException("Invalid key or value sizes are given."); - } + key.m_data = dataBuffer; + value.m_data = dataBuffer + key.m_size; + + return record; + } + + private: + // Validates key and value sizes when fixed sizes are set. + // Throws an exception if invalid sizes are used. + void Validate(const Key& key, const Value& value) const { + if ((m_fixedKeySize != 0 && key.m_size != m_fixedKeySize) || + (m_fixedValueSize != 0 && value.m_size != m_fixedValueSize)) { + throw RuntimeException("Invalid key or value sizes are given."); + } + } + + // Validates against the given meta value. + void Validate(const Key& key, + const Value& value, + const Value& metaValue) const { + Validate(key, value); + + if (m_metadataSize != metaValue.m_size) { + throw RuntimeException("Invalid meta value size is given."); + } + } + + // Serializes size information to the given buffer. + // It assumes that buffer has enough size for serialization. + std::size_t SerializeSizes(std::uint8_t* const buffer, + KeySize keySize, + ValueSize valueSize) const { + auto curBuffer = buffer; + if (m_fixedKeySize == 0) { + *reinterpret_cast(curBuffer) = keySize; + curBuffer += sizeof(keySize); } - // Validates against the given meta value. - void Validate(const Key& key, const Value& value, const Value& metaValue) const - { - Validate(key, value); - - if (m_metadataSize != metaValue.m_size) - { - throw RuntimeException("Invalid meta value size is given."); - } + if (m_fixedValueSize == 0) { + *reinterpret_cast(curBuffer) = valueSize; + curBuffer += sizeof(valueSize); } - // Serializes size information to the given buffer. - // It assumes that buffer has enough size for serialization. - std::size_t SerializeSizes( - std::uint8_t* const buffer, - KeySize keySize, - ValueSize valueSize) const - { - auto curBuffer = buffer; - if (m_fixedKeySize == 0) - { - *reinterpret_cast(curBuffer) = keySize; - curBuffer += sizeof(keySize); - } + return curBuffer - buffer; + } - if (m_fixedValueSize == 0) - { - *reinterpret_cast(curBuffer) = valueSize; - curBuffer += sizeof(valueSize); - } - - return curBuffer - buffer; - } - - const KeySize m_fixedKeySize; - const ValueSize m_fixedValueSize; - const ValueSize m_metadataSize; + const KeySize m_fixedKeySize; + const ValueSize m_fixedValueSize; + const ValueSize m_metadataSize; }; - -} // namespace HashTable -} // namespace L4 +} // namespace HashTable +} // namespace L4 diff --git a/inc/L4/HashTable/Common/SettingAdapter.h b/inc/L4/HashTable/Common/SettingAdapter.h index f2b3787..aab19e6 100644 --- a/inc/L4/HashTable/Common/SettingAdapter.h +++ b/inc/L4/HashTable/Common/SettingAdapter.h @@ -4,29 +4,27 @@ #include "HashTable/Common/SharedHashTable.h" #include "HashTable/Config.h" -namespace L4 -{ -namespace HashTable -{ +namespace L4 { +namespace HashTable { -// SettingAdapter class provides a functionality to convert a HashTableConfig::Setting object -// to a SharedHashTable::Setting object. -class SettingAdapter -{ -public: - template - typename SharedHashTable::Setting Convert(const HashTableConfig::Setting& from) const - { - typename SharedHashTable::Setting to; +// SettingAdapter class provides a functionality to convert a +// HashTableConfig::Setting object to a SharedHashTable::Setting object. +class SettingAdapter { + public: + template + typename SharedHashTable::Setting Convert( + const HashTableConfig::Setting& from) const { + typename SharedHashTable::Setting to; - to.m_numBuckets = from.m_numBuckets; - to.m_numBucketsPerMutex = (std::max)(from.m_numBucketsPerMutex.get_value_or(1U), 1U); - to.m_fixedKeySize = from.m_fixedKeySize.get_value_or(0U); - to.m_fixedValueSize = from.m_fixedValueSize.get_value_or(0U); + to.m_numBuckets = from.m_numBuckets; + to.m_numBucketsPerMutex = + (std::max)(from.m_numBucketsPerMutex.get_value_or(1U), 1U); + to.m_fixedKeySize = from.m_fixedKeySize.get_value_or(0U); + to.m_fixedValueSize = from.m_fixedValueSize.get_value_or(0U); - return to; - } + return to; + } }; -} // namespace HashTable -} // namespace L4 \ No newline at end of file +} // namespace HashTable +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/Common/SharedHashTable.h b/inc/L4/HashTable/Common/SharedHashTable.h index 7e7cf97..22b6c97 100644 --- a/inc/L4/HashTable/Common/SharedHashTable.h +++ b/inc/L4/HashTable/Common/SharedHashTable.h @@ -10,197 +10,190 @@ #include "Utils/Exception.h" #include "Utils/Lock.h" -namespace L4 -{ -namespace HashTable -{ +namespace L4 { +namespace HashTable { // SharedHashTable struct represents the hash table structure. template -struct SharedHashTable -{ - using Data = TData; - using Allocator = TAllocator; +struct SharedHashTable { + using Data = TData; + using Allocator = TAllocator; - // HashTable::Entry struct represents an entry in the chained bucket list. - // Entry layout is as follows: - // - // | tag1 | tag2 | tag3 | tag4 | tag5 | tag6 | tag7 | tag 8 | 1 - // | tag9 | tag10 | tag11 | tag12 | tag13 | tag14 | tag15 | tag 16 | 2 - // | Data1 pointer | 3 - // | Data2 pointer | 4 - // | Data3 pointer | 5 - // | Data4 pointer | 6 - // | Data5 pointer | 7 - // | Data6 pointer | 8 - // | Data7 pointer | 9 - // | Data8 pointer | 10 - // | Data9 pointer | 11 - // | Data10 pointer | 12 - // | Data11 pointer | 13 - // | Data12 pointer | 14 - // | Data13 pointer | 15 - // | Data14 pointer | 16 - // | Data15 pointer | 17 - // | Data16 pointer | 18 - // | Entry pointer to the next Entry | 19 - // <----------------------8 bytes ----------------------------------> - // , where tag1 is a tag for Data1, tag2 for Data2, and so on. A tag value can be looked up - // first before going to the corresponding Data for a quick check. - // Also note that a byte read is atomic in modern processors so that tag is just - // std::uint8_t instead of being atomic. Even in the case where the tag value read is a garbage , - // this is acceptable because of the followings: - // 1) if the garbage value was a hit where it should have been a miss: the actual key comparison will fail, - // 2) if the garbage value was a miss where it should have been a hit: the key value must - // have been changed since the tag was changed, so it will be looked up correctly - // after the tag value written is visible correctly. Note that we don't need to guarantee the timing of - // writing and reading (meaning the value written should be visible to the reader right away). - // - // Note about the CPU cache. In previous implementation, the Entry was 64 bytes to fit in the CPU cache. - // However, this resulted in lots of wasted space. For example, when the ratio of the number of expected records - // to the number of buckets was 2:1, only 85% buckets were occupied. After experiments, if you have 10:1 ratio, - // you will have 99.98% utilization of buckets. This required having more data per Entry, and the ideal number - // (after experiments) turned out to be 16 records per Entry. Also, because of how CPU fetches contiguous memory, - // this didn't have any impact on micro-benchmarking. - struct Entry - { - Entry() = default; + // HashTable::Entry struct represents an entry in the chained bucket list. + // Entry layout is as follows: + // + // | tag1 | tag2 | tag3 | tag4 | tag5 | tag6 | tag7 | tag 8 | 1 + // | tag9 | tag10 | tag11 | tag12 | tag13 | tag14 | tag15 | tag 16 | 2 + // | Data1 pointer | 3 + // | Data2 pointer | 4 + // | Data3 pointer | 5 + // | Data4 pointer | 6 + // | Data5 pointer | 7 + // | Data6 pointer | 8 + // | Data7 pointer | 9 + // | Data8 pointer | 10 + // | Data9 pointer | 11 + // | Data10 pointer | 12 + // | Data11 pointer | 13 + // | Data12 pointer | 14 + // | Data13 pointer | 15 + // | Data14 pointer | 16 + // | Data15 pointer | 17 + // | Data16 pointer | 18 + // | Entry pointer to the next Entry | 19 + // <----------------------8 bytes ----------------------------------> + // , where tag1 is a tag for Data1, tag2 for Data2, and so on. A tag value can + // be looked up first before going to the corresponding Data for a quick + // check. Also note that a byte read is atomic in modern processors so that + // tag is just std::uint8_t instead of being atomic. Even in the case where + // the tag value read is a garbage , this is acceptable because of the + // followings: + // 1) if the garbage value was a hit where it should have been a miss: the + // actual key comparison will fail, 2) if the garbage value was a miss + // where it should have been a hit: the key value must + // have been changed since the tag was changed, so it will be looked up + // correctly after the tag value written is visible correctly. Note that + // we don't need to guarantee the timing of writing and reading (meaning + // the value written should be visible to the reader right away). + // + // Note about the CPU cache. In previous implementation, the Entry was 64 + // bytes to fit in the CPU cache. However, this resulted in lots of wasted + // space. For example, when the ratio of the number of expected records to the + // number of buckets was 2:1, only 85% buckets were occupied. After + // experiments, if you have 10:1 ratio, you will have 99.98% utilization of + // buckets. This required having more data per Entry, and the ideal number + // (after experiments) turned out to be 16 records per Entry. Also, because of + // how CPU fetches contiguous memory, this didn't have any impact on + // micro-benchmarking. + struct Entry { + Entry() = default; - // Releases deallocates all the memories of the chained entries including - // the data list in the current Entry. - void Release(Allocator allocator) - { - auto dataDeleter = [allocator](auto& data) - { - auto dataToDelete = data.Load(); - if (dataToDelete != nullptr) - { - dataToDelete->~Data(); - typename Allocator::template rebind::other(allocator).deallocate(dataToDelete, 1U); - } - }; - - // Delete all the chained entries, not including itself. - auto curEntry = m_next.Load(); - - while (curEntry != nullptr) - { - auto entryToDelete = curEntry; - - // Copy m_next for the next iteration. - curEntry = entryToDelete->m_next.Load(); + // Releases deallocates all the memories of the chained entries including + // the data list in the current Entry. + void Release(Allocator allocator) { + auto dataDeleter = [allocator](auto& data) { + auto dataToDelete = data.Load(); + if (dataToDelete != nullptr) { + dataToDelete->~Data(); + typename Allocator::template rebind::other(allocator) + .deallocate(dataToDelete, 1U); + } + }; - // Delete all the data within this entry. - for (auto& data : entryToDelete->m_dataList) - { - dataDeleter(data); - } - - // Clean the current entry itself. - entryToDelete->~Entry(); - typename Allocator::template rebind::other(allocator).deallocate(entryToDelete, 1U); - } - - // Delete all the data from the head of chained entries. - for (auto& data : m_dataList) - { - dataDeleter(data); - } + // Delete all the chained entries, not including itself. + auto curEntry = m_next.Load(); + + while (curEntry != nullptr) { + auto entryToDelete = curEntry; + + // Copy m_next for the next iteration. + curEntry = entryToDelete->m_next.Load(); + + // Delete all the data within this entry. + for (auto& data : entryToDelete->m_dataList) { + dataDeleter(data); } - static constexpr std::uint8_t c_numDataPerEntry = 16U; + // Clean the current entry itself. + entryToDelete->~Entry(); + typename Allocator::template rebind::other(allocator).deallocate( + entryToDelete, 1U); + } - std::array m_tags{ 0U }; - - std::array, c_numDataPerEntry> m_dataList{}; - - Utils::AtomicOffsetPtr m_next{}; - }; - - static_assert(sizeof(Entry) == 152, "Entry should be 152 bytes."); - - struct Setting - { - using KeySize = IReadOnlyHashTable::Key::size_type; - using ValueSize = IReadOnlyHashTable::Value::size_type; - - Setting() = default; - - explicit Setting( - std::uint32_t numBuckets, - std::uint32_t numBucketsPerMutex = 1U, - KeySize fixedKeySize = 0U, - ValueSize fixedValueSize = 0U) - : m_numBuckets{ numBuckets } - , m_numBucketsPerMutex{ numBucketsPerMutex } - , m_fixedKeySize{ fixedKeySize } - , m_fixedValueSize{ fixedValueSize } - {} - - std::uint32_t m_numBuckets = 1U; - std::uint32_t m_numBucketsPerMutex = 1U; - KeySize m_fixedKeySize = 0U; - ValueSize m_fixedValueSize = 0U; - }; - - SharedHashTable( - const Setting& setting, - Allocator allocator) - : m_allocator{ allocator } - , m_setting{ setting } - , m_buckets{ setting.m_numBuckets, typename Allocator::template rebind::other(m_allocator) } - , m_mutexes{ - (std::max)(setting.m_numBuckets / (std::max)(setting.m_numBucketsPerMutex, 1U), 1U), - typename Allocator::template rebind::other(m_allocator) } - , m_perfData{} - { - m_perfData.Set(HashTablePerfCounter::BucketsCount, m_buckets.size()); - m_perfData.Set( - HashTablePerfCounter::TotalIndexSize, - (m_buckets.size() * sizeof(Entry)) - + (m_mutexes.size() * sizeof(Mutex)) - + sizeof(SharedHashTable)); + // Delete all the data from the head of chained entries. + for (auto& data : m_dataList) { + dataDeleter(data); + } } - ~SharedHashTable() - { - for (auto& bucket : m_buckets) - { - bucket.Release(m_allocator); - } + static constexpr std::uint8_t c_numDataPerEntry = 16U; + + std::array m_tags{0U}; + + std::array, c_numDataPerEntry> m_dataList{}; + + Utils::AtomicOffsetPtr m_next{}; + }; + + static_assert(sizeof(Entry) == 152, "Entry should be 152 bytes."); + + struct Setting { + using KeySize = IReadOnlyHashTable::Key::size_type; + using ValueSize = IReadOnlyHashTable::Value::size_type; + + Setting() = default; + + explicit Setting(std::uint32_t numBuckets, + std::uint32_t numBucketsPerMutex = 1U, + KeySize fixedKeySize = 0U, + ValueSize fixedValueSize = 0U) + : m_numBuckets{numBuckets}, + m_numBucketsPerMutex{numBucketsPerMutex}, + m_fixedKeySize{fixedKeySize}, + m_fixedValueSize{fixedValueSize} {} + + std::uint32_t m_numBuckets = 1U; + std::uint32_t m_numBucketsPerMutex = 1U; + KeySize m_fixedKeySize = 0U; + ValueSize m_fixedValueSize = 0U; + }; + + SharedHashTable(const Setting& setting, Allocator allocator) + : m_allocator{allocator}, + m_setting{setting}, + m_buckets{ + setting.m_numBuckets, + typename Allocator::template rebind::other(m_allocator)}, + m_mutexes{ + (std::max)(setting.m_numBuckets / + (std::max)(setting.m_numBucketsPerMutex, 1U), + 1U), + typename Allocator::template rebind::other(m_allocator)}, + m_perfData{} { + m_perfData.Set(HashTablePerfCounter::BucketsCount, m_buckets.size()); + m_perfData.Set(HashTablePerfCounter::TotalIndexSize, + (m_buckets.size() * sizeof(Entry)) + + (m_mutexes.size() * sizeof(Mutex)) + + sizeof(SharedHashTable)); + } + + ~SharedHashTable() { + for (auto& bucket : m_buckets) { + bucket.Release(m_allocator); } + } - using Mutex = Utils::ReaderWriterLockSlim; - using Lock = std::lock_guard; - using UniqueLock = std::unique_lock; + using Mutex = Utils::ReaderWriterLockSlim; + using Lock = std::lock_guard; + using UniqueLock = std::unique_lock; - using Buckets = Interprocess::Container::Vector::other>; - using Mutexes = Interprocess::Container::Vector::other>; + using Buckets = Interprocess::Container:: + Vector::other>; + using Mutexes = Interprocess::Container:: + Vector::other>; - template - auto GetAllocator() const - { - return typename Allocator::template rebind::other(m_allocator); - } + template + auto GetAllocator() const { + return typename Allocator::template rebind::other(m_allocator); + } - Mutex& GetMutex(std::size_t index) - { - return m_mutexes[index % m_mutexes.size()]; - } + Mutex& GetMutex(std::size_t index) { + return m_mutexes[index % m_mutexes.size()]; + } - Allocator m_allocator; + Allocator m_allocator; - const Setting m_setting; + const Setting m_setting; - Buckets m_buckets; + Buckets m_buckets; - Mutexes m_mutexes; + Mutexes m_mutexes; - HashTablePerfData m_perfData; + HashTablePerfData m_perfData; - SharedHashTable(const SharedHashTable&) = delete; - SharedHashTable& operator=(const SharedHashTable&) = delete; + SharedHashTable(const SharedHashTable&) = delete; + SharedHashTable& operator=(const SharedHashTable&) = delete; }; -} // namespace HashTable -} // namespace L4 +} // namespace HashTable +} // namespace L4 diff --git a/inc/L4/HashTable/Config.h b/inc/L4/HashTable/Config.h index d2111e8..965483c 100644 --- a/inc/L4/HashTable/Config.h +++ b/inc/L4/HashTable/Config.h @@ -2,89 +2,75 @@ #include #include -#include #include +#include #include #include "HashTable/IHashTable.h" #include "Utils/Properties.h" -namespace L4 -{ +namespace L4 { // HashTableConfig struct. -struct HashTableConfig -{ - struct Setting - { - using KeySize = IReadOnlyHashTable::Key::size_type; - using ValueSize = IReadOnlyHashTable::Value::size_type; +struct HashTableConfig { + struct Setting { + using KeySize = IReadOnlyHashTable::Key::size_type; + using ValueSize = IReadOnlyHashTable::Value::size_type; - explicit Setting( - std::uint32_t numBuckets, - boost::optional numBucketsPerMutex = {}, - boost::optional fixedKeySize = {}, - boost::optional fixedValueSize = {}) - : m_numBuckets{ numBuckets } - , m_numBucketsPerMutex{ numBucketsPerMutex } - , m_fixedKeySize{ fixedKeySize } - , m_fixedValueSize{ fixedValueSize } - {} + explicit Setting(std::uint32_t numBuckets, + boost::optional numBucketsPerMutex = {}, + boost::optional fixedKeySize = {}, + boost::optional fixedValueSize = {}) + : m_numBuckets{numBuckets}, + m_numBucketsPerMutex{numBucketsPerMutex}, + m_fixedKeySize{fixedKeySize}, + m_fixedValueSize{fixedValueSize} {} - std::uint32_t m_numBuckets; - boost::optional m_numBucketsPerMutex; - boost::optional m_fixedKeySize; - boost::optional m_fixedValueSize; - }; + std::uint32_t m_numBuckets; + boost::optional m_numBucketsPerMutex; + boost::optional m_fixedKeySize; + boost::optional m_fixedValueSize; + }; - struct Cache - { - Cache( - std::uint64_t maxCacheSizeInBytes, - std::chrono::seconds recordTimeToLive, - bool forceTimeBasedEviction) - : m_maxCacheSizeInBytes{ maxCacheSizeInBytes } - , m_recordTimeToLive{ recordTimeToLive } - , m_forceTimeBasedEviction{ forceTimeBasedEviction } - {} + struct Cache { + Cache(std::uint64_t maxCacheSizeInBytes, + std::chrono::seconds recordTimeToLive, + bool forceTimeBasedEviction) + : m_maxCacheSizeInBytes{maxCacheSizeInBytes}, + m_recordTimeToLive{recordTimeToLive}, + m_forceTimeBasedEviction{forceTimeBasedEviction} {} - std::uint64_t m_maxCacheSizeInBytes; - std::chrono::seconds m_recordTimeToLive; - bool m_forceTimeBasedEviction; - }; + std::uint64_t m_maxCacheSizeInBytes; + std::chrono::seconds m_recordTimeToLive; + bool m_forceTimeBasedEviction; + }; - struct Serializer - { - using Properties = Utils::Properties; + struct Serializer { + using Properties = Utils::Properties; - Serializer( - std::shared_ptr stream = {}, - boost::optional properties = {}) - : m_stream{ stream } - , m_properties{ properties } - {} + Serializer(std::shared_ptr stream = {}, + boost::optional properties = {}) + : m_stream{stream}, m_properties{properties} {} - std::shared_ptr m_stream; - boost::optional m_properties; - }; + std::shared_ptr m_stream; + boost::optional m_properties; + }; - HashTableConfig( - std::string name, - Setting setting, - boost::optional cache = {}, - boost::optional serializer = {}) - : m_name{ std::move(name) } - , m_setting{ std::move(setting) } - , m_cache{ cache } - , m_serializer{ serializer } - { - assert(m_setting.m_numBuckets > 0U - || (m_serializer && (serializer->m_stream != nullptr))); - } + HashTableConfig(std::string name, + Setting setting, + boost::optional cache = {}, + boost::optional serializer = {}) + : m_name{std::move(name)}, + m_setting{std::move(setting)}, + m_cache{cache}, + m_serializer{serializer} { + assert(m_setting.m_numBuckets > 0U || + (m_serializer && (serializer->m_stream != nullptr))); + } - std::string m_name; - Setting m_setting; - boost::optional m_cache; - boost::optional m_serializer; + std::string m_name; + Setting m_setting; + boost::optional m_cache; + boost::optional m_serializer; }; -} // namespace L4 +} // namespace L4 diff --git a/inc/L4/HashTable/IHashTable.h b/inc/L4/HashTable/IHashTable.h index 930686c..645758a 100644 --- a/inc/L4/HashTable/IHashTable.h +++ b/inc/L4/HashTable/IHashTable.h @@ -5,92 +5,79 @@ #include "Log/PerfCounter.h" #include "Utils/Properties.h" -namespace L4 -{ +namespace L4 { // IReadOnlyHashTable interface for read-only access to the hash table. -struct IReadOnlyHashTable -{ - // Blob struct that represents a memory blob. - template - struct Blob - { - using size_type = TSize; +struct IReadOnlyHashTable { + // Blob struct that represents a memory blob. + template + struct Blob { + using size_type = TSize; - explicit Blob(const std::uint8_t* data = nullptr, size_type size = 0U) - : m_data{ data } - , m_size{ size } - { - static_assert(std::numeric_limits::is_integer, "size_type is not an integer."); - } + explicit Blob(const std::uint8_t* data = nullptr, size_type size = 0U) + : m_data{data}, m_size{size} { + static_assert(std::numeric_limits::is_integer, + "size_type is not an integer."); + } - bool operator==(const Blob& other) const - { - return (m_size == other.m_size) - && !memcmp(m_data, other.m_data, m_size); - } + bool operator==(const Blob& other) const { + return (m_size == other.m_size) && !memcmp(m_data, other.m_data, m_size); + } - bool operator!=(const Blob& other) const - { - return !(*this == other); - } + bool operator!=(const Blob& other) const { return !(*this == other); } - const std::uint8_t* m_data; - size_type m_size; - }; + const std::uint8_t* m_data; + size_type m_size; + }; - using Key = Blob; - using Value = Blob; + using Key = Blob; + using Value = Blob; - struct IIterator; + struct IIterator; - using IIteratorPtr = std::unique_ptr; + using IIteratorPtr = std::unique_ptr; - virtual ~IReadOnlyHashTable() = default; + virtual ~IReadOnlyHashTable() = default; - virtual bool Get(const Key& key, Value& value) const = 0; + virtual bool Get(const Key& key, Value& value) const = 0; - virtual IIteratorPtr GetIterator() const = 0; + virtual IIteratorPtr GetIterator() const = 0; - virtual const HashTablePerfData& GetPerfData() const = 0; + virtual const HashTablePerfData& GetPerfData() const = 0; }; // IReadOnlyHashTable::IIterator interface for the hash table iterator. -struct IReadOnlyHashTable::IIterator -{ - virtual ~IIterator() = default; +struct IReadOnlyHashTable::IIterator { + virtual ~IIterator() = default; - virtual void Reset() = 0; + virtual void Reset() = 0; - virtual bool MoveNext() = 0; + virtual bool MoveNext() = 0; - virtual Key GetKey() const = 0; + virtual Key GetKey() const = 0; - virtual Value GetValue() const = 0; + virtual Value GetValue() const = 0; }; // IWritableHashTable interface for write access to the hash table. -struct IWritableHashTable : public virtual IReadOnlyHashTable -{ - struct ISerializer; +struct IWritableHashTable : public virtual IReadOnlyHashTable { + struct ISerializer; - using ISerializerPtr = std::unique_ptr; + using ISerializerPtr = std::unique_ptr; - virtual void Add(const Key& key, const Value& value) = 0; + virtual void Add(const Key& key, const Value& value) = 0; - virtual bool Remove(const Key& key) = 0; + virtual bool Remove(const Key& key) = 0; - virtual ISerializerPtr GetSerializer() const = 0; + virtual ISerializerPtr GetSerializer() const = 0; }; // IWritableHashTable::ISerializer interface for serializing hash table. -struct IWritableHashTable::ISerializer -{ - virtual ~ISerializer() = default; +struct IWritableHashTable::ISerializer { + virtual ~ISerializer() = default; - virtual void Serialize( - std::ostream& stream, - const Utils::Properties& properties) = 0; + virtual void Serialize(std::ostream& stream, + const Utils::Properties& properties) = 0; }; -} // namespace L4 \ No newline at end of file +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/ReadWrite/HashTable.h b/inc/L4/HashTable/ReadWrite/HashTable.h index 07b04be..c925ba6 100644 --- a/inc/L4/HashTable/ReadWrite/HashTable.h +++ b/inc/L4/HashTable/ReadWrite/HashTable.h @@ -3,583 +3,515 @@ #include #include #include -#include "detail/ToRawPointer.h" #include "Epoch/IEpochActionManager.h" -#include "HashTable/Common/SharedHashTable.h" #include "HashTable/Common/Record.h" +#include "HashTable/Common/SharedHashTable.h" #include "HashTable/IHashTable.h" #include "HashTable/ReadWrite/Serializer.h" #include "Log/PerfCounter.h" #include "Utils/Exception.h" #include "Utils/MurmurHash3.h" #include "Utils/Properties.h" +#include "detail/ToRawPointer.h" -namespace L4 -{ +namespace L4 { -// ReadWriteHashTable is a general purpose hash table where the look up is look free. -namespace HashTable -{ -namespace ReadWrite -{ +// ReadWriteHashTable is a general purpose hash table where the look up is look +// free. +namespace HashTable { +namespace ReadWrite { // ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides // the functionality to read data given a key. template -class ReadOnlyHashTable : public virtual IReadOnlyHashTable -{ -public: - using HashTable = SharedHashTable; +class ReadOnlyHashTable : public virtual IReadOnlyHashTable { + public: + using HashTable = SharedHashTable; - class Iterator; + class Iterator; - explicit ReadOnlyHashTable( - HashTable& hashTable, - boost::optional recordSerializer = boost::none) - : m_hashTable{ hashTable } - , m_recordSerializer{ + explicit ReadOnlyHashTable( + HashTable& hashTable, + boost::optional recordSerializer = boost::none) + : m_hashTable{hashTable}, + m_recordSerializer{ recordSerializer - ? *recordSerializer - : RecordSerializer{ - m_hashTable.m_setting.m_fixedKeySize, - m_hashTable.m_setting.m_fixedValueSize } } - {} + ? *recordSerializer + : RecordSerializer{m_hashTable.m_setting.m_fixedKeySize, + m_hashTable.m_setting.m_fixedValueSize}} {} - virtual bool Get(const Key& key, Value& value) const override - { - const auto bucketInfo = GetBucketInfo(key); - const auto* entry = &m_hashTable.m_buckets[bucketInfo.first]; + virtual bool Get(const Key& key, Value& value) const override { + const auto bucketInfo = GetBucketInfo(key); + const auto* entry = &m_hashTable.m_buckets[bucketInfo.first]; - while (entry != nullptr) - { - for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) - { - if (bucketInfo.second == entry->m_tags[i]) - { - // There could be a race condition where m_dataList[i] is updated during access. - // Therefore, load it once and save it (it's safe to store it b/c the memory - // will not be deleted until ref count becomes 0). - const auto data = entry->m_dataList[i].Load(std::memory_order_acquire); + while (entry != nullptr) { + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) { + if (bucketInfo.second == entry->m_tags[i]) { + // There could be a race condition where m_dataList[i] is updated + // during access. Therefore, load it once and save it (it's safe to + // store it b/c the memory will not be deleted until ref count becomes + // 0). + const auto data = + entry->m_dataList[i].Load(std::memory_order_acquire); - if (data != nullptr) - { - const auto record = m_recordSerializer.Deserialize(*data); - if (record.m_key == key) - { - value = record.m_value; - return true; - } - } - } + if (data != nullptr) { + const auto record = m_recordSerializer.Deserialize(*data); + if (record.m_key == key) { + value = record.m_value; + return true; } - - entry = entry->m_next.Load(std::memory_order_acquire); + } } + } - return false; + entry = entry->m_next.Load(std::memory_order_acquire); } - virtual IIteratorPtr GetIterator() const override - { - return std::make_unique(m_hashTable, m_recordSerializer); - } + return false; + } - virtual const HashTablePerfData& GetPerfData() const override - { - // Synchronizes with any std::memory_order_release if there exists, so that - // HashTablePerfData has the latest values at the moment when GetPerfData() is called. - std::atomic_thread_fence(std::memory_order_acquire); - return m_hashTable.m_perfData; - } + virtual IIteratorPtr GetIterator() const override { + return std::make_unique(m_hashTable, m_recordSerializer); + } - ReadOnlyHashTable(const ReadOnlyHashTable&) = delete; - ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete; + virtual const HashTablePerfData& GetPerfData() const override { + // Synchronizes with any std::memory_order_release if there exists, so that + // HashTablePerfData has the latest values at the moment when GetPerfData() + // is called. + std::atomic_thread_fence(std::memory_order_acquire); + return m_hashTable.m_perfData; + } -protected: - // GetBucketInfo returns a pair, where the first is the index to the bucket - // and the second is the tag value for the given key. - // In this hash table, we treat tag value of 0 as empty (see WritableHashTable::Remove()), - // so in the worst case scenario, where an entry has an empty data list and the tag - // value returned for the key is 0, the look up cost is up to 6 checks. We can do something - // smarter by using the unused two bytes per Entry, but since an Entry object fits into - // CPU cache, the extra overhead should be minimal. - std::pair GetBucketInfo(const Key& key) const - { - std::array hash; - MurmurHash3_x64_128(key.m_data, key.m_size, 0U, hash.data()); + ReadOnlyHashTable(const ReadOnlyHashTable&) = delete; + ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete; - return { - static_cast(hash[0] % m_hashTable.m_buckets.size()), - static_cast(hash[1]) }; - } + protected: + // GetBucketInfo returns a pair, where the first is the index to the bucket + // and the second is the tag value for the given key. + // In this hash table, we treat tag value of 0 as empty (see + // WritableHashTable::Remove()), so in the worst case scenario, where an entry + // has an empty data list and the tag value returned for the key is 0, the + // look up cost is up to 6 checks. We can do something smarter by using the + // unused two bytes per Entry, but since an Entry object fits into CPU cache, + // the extra overhead should be minimal. + std::pair GetBucketInfo(const Key& key) const { + std::array hash; + MurmurHash3_x64_128(key.m_data, key.m_size, 0U, hash.data()); - HashTable& m_hashTable; + return {static_cast(hash[0] % m_hashTable.m_buckets.size()), + static_cast(hash[1])}; + } - RecordSerializer m_recordSerializer; + HashTable& m_hashTable; + + RecordSerializer m_recordSerializer; }; - // ReadOnlyHashTable::Iterator class implements IIterator interface and provides // read-only iterator for the ReadOnlyHashTable. template -class ReadOnlyHashTable::Iterator : public IIterator -{ -public: - Iterator( - const HashTable& hashTable, - const RecordSerializer& recordDeserializer) - : m_hashTable{ hashTable } - , m_recordSerializer{ recordDeserializer } - , m_currentBucketIndex{ -1 } - , m_currentRecordIndex{ 0U } - , m_currentEntry{ nullptr } - {} +class ReadOnlyHashTable::Iterator : public IIterator { + public: + Iterator(const HashTable& hashTable, + const RecordSerializer& recordDeserializer) + : m_hashTable{hashTable}, + m_recordSerializer{recordDeserializer}, + m_currentBucketIndex{-1}, + m_currentRecordIndex{0U}, + m_currentEntry{nullptr} {} - Iterator(Iterator&& iterator) - : m_hashTable{ std::move(iterator.m_hashTable) } - , m_recordSerializer{ std::move(iterator.recordDeserializer) } - , m_currentBucketIndex{ std::move(iterator.m_currentBucketIndex) } - , m_currentRecordIndex{ std::move(iterator.m_currentRecordIndex) } - , m_currentEntry{ std::move(iterator.m_currentEntry) } - {} + Iterator(Iterator&& iterator) + : m_hashTable{std::move(iterator.m_hashTable)}, + m_recordSerializer{std::move(iterator.recordDeserializer)}, + m_currentBucketIndex{std::move(iterator.m_currentBucketIndex)}, + m_currentRecordIndex{std::move(iterator.m_currentRecordIndex)}, + m_currentEntry{std::move(iterator.m_currentEntry)} {} - void Reset() override - { - m_currentBucketIndex = -1; + void Reset() override { + m_currentBucketIndex = -1; + m_currentRecordIndex = 0U; + m_currentEntry = nullptr; + } + + bool MoveNext() override { + if (IsEnd()) { + return false; + } + + if (m_currentEntry != nullptr) { + MoveToNextData(); + } + + assert(m_currentRecordIndex < HashTable::Entry::c_numDataPerEntry); + + while ((m_currentEntry == nullptr) || + (m_currentRecord = + m_currentEntry->m_dataList[m_currentRecordIndex].Load()) == + nullptr) { + if (m_currentEntry == nullptr) { + ++m_currentBucketIndex; m_currentRecordIndex = 0U; - m_currentEntry = nullptr; - } - bool MoveNext() override - { - if (IsEnd()) - { - return false; + if (IsEnd()) { + return false; } - if (m_currentEntry != nullptr) - { - MoveToNextData(); - } - - assert(m_currentRecordIndex < HashTable::Entry::c_numDataPerEntry); - - while ((m_currentEntry == nullptr) - || (m_currentRecord = m_currentEntry->m_dataList[m_currentRecordIndex].Load()) == nullptr) - { - if (m_currentEntry == nullptr) - { - ++m_currentBucketIndex; - m_currentRecordIndex = 0U; - - if (IsEnd()) - { - return false; - } - - m_currentEntry = &m_hashTable.m_buckets[m_currentBucketIndex]; - } - else - { - MoveToNextData(); - } - } - - assert(m_currentEntry != nullptr); - assert(m_currentRecord != nullptr); - - return true; + m_currentEntry = &m_hashTable.m_buckets[m_currentBucketIndex]; + } else { + MoveToNextData(); + } } - Key GetKey() const override - { - if (!IsValid()) - { - throw RuntimeException("HashTableIterator is not correctly used."); - } + assert(m_currentEntry != nullptr); + assert(m_currentRecord != nullptr); - return m_recordSerializer.Deserialize(*m_currentRecord).m_key; + return true; + } + + Key GetKey() const override { + if (!IsValid()) { + throw RuntimeException("HashTableIterator is not correctly used."); } - Value GetValue() const override - { - if (!IsValid()) - { - throw RuntimeException("HashTableIterator is not correctly used."); - } + return m_recordSerializer.Deserialize(*m_currentRecord).m_key; + } - return m_recordSerializer.Deserialize(*m_currentRecord).m_value; + Value GetValue() const override { + if (!IsValid()) { + throw RuntimeException("HashTableIterator is not correctly used."); } - Iterator(const Iterator&) = delete; - Iterator& operator=(const Iterator&) = delete; + return m_recordSerializer.Deserialize(*m_currentRecord).m_value; + } -private: - bool IsValid() const - { - return !IsEnd() - && (m_currentEntry != nullptr) - && (m_currentRecord != nullptr); + Iterator(const Iterator&) = delete; + Iterator& operator=(const Iterator&) = delete; + + private: + bool IsValid() const { + return !IsEnd() && (m_currentEntry != nullptr) && + (m_currentRecord != nullptr); + } + + bool IsEnd() const { + return m_currentBucketIndex == + static_cast(m_hashTable.m_buckets.size()); + } + + void MoveToNextData() { + if (++m_currentRecordIndex >= HashTable::Entry::c_numDataPerEntry) { + m_currentRecordIndex = 0U; + m_currentEntry = m_currentEntry->m_next.Load(); } + } - bool IsEnd() const - { - return m_currentBucketIndex == static_cast(m_hashTable.m_buckets.size()); - } + const HashTable& m_hashTable; + const RecordSerializer& m_recordSerializer; - void MoveToNextData() - { - if (++m_currentRecordIndex >= HashTable::Entry::c_numDataPerEntry) - { - m_currentRecordIndex = 0U; - m_currentEntry = m_currentEntry->m_next.Load(); - } - } + std::int64_t m_currentBucketIndex; + std::uint8_t m_currentRecordIndex; - const HashTable& m_hashTable; - const RecordSerializer& m_recordSerializer; - - std::int64_t m_currentBucketIndex; - std::uint8_t m_currentRecordIndex; - - const typename HashTable::Entry* m_currentEntry; - const RecordBuffer* m_currentRecord; + const typename HashTable::Entry* m_currentEntry; + const RecordBuffer* m_currentRecord; }; - -// The following warning is from the virtual inheritance and safe to disable in this case. -// https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx +// The following warning is from the virtual inheritance and safe to disable in +// this case. https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx #pragma warning(push) -#pragma warning(disable:4250) +#pragma warning(disable : 4250) -// WritableHashTable class implements IWritableHashTable interface and also provides -// the read only access (Get()) to the hash table. -// Note the virtual inheritance on ReadOnlyHashTable so that any derived class -// can have only one ReadOnlyHashTable base class instance. +// WritableHashTable class implements IWritableHashTable interface and also +// provides the read only access (Get()) to the hash table. Note the virtual +// inheritance on ReadOnlyHashTable so that any derived class can +// have only one ReadOnlyHashTable base class instance. template -class WritableHashTable - : public virtual ReadOnlyHashTable - , public IWritableHashTable -{ -public: - using Base = ReadOnlyHashTable; - using HashTable = typename Base::HashTable; +class WritableHashTable : public virtual ReadOnlyHashTable, + public IWritableHashTable { + public: + using Base = ReadOnlyHashTable; + using HashTable = typename Base::HashTable; - WritableHashTable( - HashTable& hashTable, - IEpochActionManager& epochManager) - : Base(hashTable) - , m_epochManager{ epochManager } - {} + WritableHashTable(HashTable& hashTable, IEpochActionManager& epochManager) + : Base(hashTable), m_epochManager{epochManager} {} - virtual void Add(const Key& key, const Value& value) override - { - Add(CreateRecordBuffer(key, value)); - } + virtual void Add(const Key& key, const Value& value) override { + Add(CreateRecordBuffer(key, value)); + } - virtual bool Remove(const Key& key) override - { - const auto bucketInfo = this->GetBucketInfo(key); + virtual bool Remove(const Key& key) override { + const auto bucketInfo = this->GetBucketInfo(key); - auto* entry = &(this->m_hashTable.m_buckets[bucketInfo.first]); + auto* entry = &(this->m_hashTable.m_buckets[bucketInfo.first]); - typename HashTable::Lock lock{ this->m_hashTable.GetMutex(bucketInfo.first) }; + typename HashTable::Lock lock{this->m_hashTable.GetMutex(bucketInfo.first)}; - // Note that similar to Add(), the following block is performed inside a critical section, - // therefore, it is safe to do "Load"s with memory_order_relaxed. - while (entry != nullptr) - { - for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) - { - if (bucketInfo.second == entry->m_tags[i]) - { - const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed); + // Note that similar to Add(), the following block is performed inside a + // critical section, therefore, it is safe to do "Load"s with + // memory_order_relaxed. + while (entry != nullptr) { + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) { + if (bucketInfo.second == entry->m_tags[i]) { + const auto data = + entry->m_dataList[i].Load(std::memory_order_relaxed); - if (data != nullptr) - { - const auto record = this->m_recordSerializer.Deserialize(*data); - if (record.m_key == key) - { - Remove(*entry, i); - return true; - } - } - } + if (data != nullptr) { + const auto record = this->m_recordSerializer.Deserialize(*data); + if (record.m_key == key) { + Remove(*entry, i); + return true; } - - entry = entry->m_next.Load(std::memory_order_relaxed); + } } + } - return false; + entry = entry->m_next.Load(std::memory_order_relaxed); } - virtual ISerializerPtr GetSerializer() const override - { - return std::make_unique(this->m_hashTable); - } + return false; + } -protected: - void Add(RecordBuffer* recordToAdd) - { - assert(recordToAdd != nullptr); + virtual ISerializerPtr GetSerializer() const override { + return std::make_unique(this->m_hashTable); + } - const auto newRecord = this->m_recordSerializer.Deserialize(*recordToAdd); - const auto& newKey = newRecord.m_key; - const auto& newValue = newRecord.m_value; + protected: + void Add(RecordBuffer* recordToAdd) { + assert(recordToAdd != nullptr); - Stat stat{ newKey.m_size, newValue.m_size }; + const auto newRecord = this->m_recordSerializer.Deserialize(*recordToAdd); + const auto& newKey = newRecord.m_key; + const auto& newValue = newRecord.m_value; - const auto bucketInfo = this->GetBucketInfo(newKey); + Stat stat{newKey.m_size, newValue.m_size}; - auto* curEntry = &(this->m_hashTable.m_buckets[bucketInfo.first]); + const auto bucketInfo = this->GetBucketInfo(newKey); - typename HashTable::Entry* entryToUpdate = nullptr; - std::uint8_t curDataIndex = 0U; + auto* curEntry = &(this->m_hashTable.m_buckets[bucketInfo.first]); - typename HashTable::UniqueLock lock{ this->m_hashTable.GetMutex(bucketInfo.first) }; + typename HashTable::Entry* entryToUpdate = nullptr; + std::uint8_t curDataIndex = 0U; - // Note that the following block is performed inside a critical section, therefore, - // it is safe to do "Load"s with memory_order_relaxed. - while (curEntry != nullptr) - { - ++stat.m_chainIndex; + typename HashTable::UniqueLock lock{ + this->m_hashTable.GetMutex(bucketInfo.first)}; - for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) - { - const auto data = curEntry->m_dataList[i].Load(std::memory_order_relaxed); + // Note that the following block is performed inside a critical section, + // therefore, it is safe to do "Load"s with memory_order_relaxed. + while (curEntry != nullptr) { + ++stat.m_chainIndex; - if (data == nullptr) - { - if (entryToUpdate == nullptr) - { - // Found an entry with no data set, but still need to go through the end of - // the list to see if an entry with the given key exists. - entryToUpdate = curEntry; - curDataIndex = i; - } - } - else if (curEntry->m_tags[i] == bucketInfo.second) - { - const auto oldRecord = this->m_recordSerializer.Deserialize(*data); - if (newKey == oldRecord.m_key) - { - // Will overwrite this entry data. - entryToUpdate = curEntry; - curDataIndex = i; - stat.m_oldValueSize = oldRecord.m_value.m_size; - break; - } - } - } + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) { + const auto data = + curEntry->m_dataList[i].Load(std::memory_order_relaxed); - // Found the entry data to replaces. - if (stat.m_oldValueSize != 0U) - { - break; - } - - // Check if this is the end of the chaining. If so, create a new entry if we haven't found - // any entry to update along the way. - if (entryToUpdate == nullptr && curEntry->m_next.Load(std::memory_order_relaxed) == nullptr) - { - curEntry->m_next.Store( - new (Detail::to_raw_pointer( - this->m_hashTable.template GetAllocator().allocate(1U))) - typename HashTable::Entry(), - std::memory_order_release); - - stat.m_isNewEntryAdded = true; - } - - curEntry = curEntry->m_next.Load(std::memory_order_relaxed); + if (data == nullptr) { + if (entryToUpdate == nullptr) { + // Found an entry with no data set, but still need to go through the + // end of the list to see if an entry with the given key exists. + entryToUpdate = curEntry; + curDataIndex = i; + } + } else if (curEntry->m_tags[i] == bucketInfo.second) { + const auto oldRecord = this->m_recordSerializer.Deserialize(*data); + if (newKey == oldRecord.m_key) { + // Will overwrite this entry data. + entryToUpdate = curEntry; + curDataIndex = i; + stat.m_oldValueSize = oldRecord.m_value.m_size; + break; + } } + } - assert(entryToUpdate != nullptr); + // Found the entry data to replaces. + if (stat.m_oldValueSize != 0U) { + break; + } - auto recordToDelete = UpdateRecord(*entryToUpdate, curDataIndex, recordToAdd, bucketInfo.second); + // Check if this is the end of the chaining. If so, create a new entry if + // we haven't found any entry to update along the way. + if (entryToUpdate == nullptr && + curEntry->m_next.Load(std::memory_order_relaxed) == nullptr) { + curEntry->m_next.Store( + new (Detail::to_raw_pointer( + this->m_hashTable + .template GetAllocator() + .allocate(1U))) typename HashTable::Entry(), + std::memory_order_release); - lock.unlock(); + stat.m_isNewEntryAdded = true; + } - UpdatePerfDataForAdd(stat); - - ReleaseRecord(recordToDelete); + curEntry = curEntry->m_next.Load(std::memory_order_relaxed); } - // The chainIndex is the 1-based index for the given entry in the chained bucket list. - // It is assumed that this function is called under a lock. - void Remove(typename HashTable::Entry& entry, std::uint8_t index) - { - auto recordToDelete = UpdateRecord(entry, index, nullptr, 0U); + assert(entryToUpdate != nullptr); - assert(recordToDelete != nullptr); + auto recordToDelete = UpdateRecord(*entryToUpdate, curDataIndex, + recordToAdd, bucketInfo.second); - const auto record = this->m_recordSerializer.Deserialize(*recordToDelete); + lock.unlock(); - UpdatePerfDataForRemove( - Stat{ - record.m_key.m_size, - record.m_value.m_size, - 0U - }); + UpdatePerfDataForAdd(stat); - ReleaseRecord(recordToDelete); + ReleaseRecord(recordToDelete); + } + + // The chainIndex is the 1-based index for the given entry in the chained + // bucket list. It is assumed that this function is called under a lock. + void Remove(typename HashTable::Entry& entry, std::uint8_t index) { + auto recordToDelete = UpdateRecord(entry, index, nullptr, 0U); + + assert(recordToDelete != nullptr); + + const auto record = this->m_recordSerializer.Deserialize(*recordToDelete); + + UpdatePerfDataForRemove( + Stat{record.m_key.m_size, record.m_value.m_size, 0U}); + + ReleaseRecord(recordToDelete); + } + + private: + struct Stat; + + class Serializer; + + RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value) { + const auto bufferSize = + this->m_recordSerializer.CalculateBufferSize(key, value); + auto buffer = Detail::to_raw_pointer( + this->m_hashTable.template GetAllocator().allocate( + bufferSize)); + + return this->m_recordSerializer.Serialize(key, value, buffer, bufferSize); + } + + RecordBuffer* UpdateRecord(typename HashTable::Entry& entry, + std::uint8_t index, + RecordBuffer* newRecord, + std::uint8_t newTag) { + // This function should be called under a lock, so calling with + // memory_order_relaxed for Load() is safe. + auto& recordHolder = entry.m_dataList[index]; + auto oldRecord = recordHolder.Load(std::memory_order_relaxed); + + recordHolder.Store(newRecord, std::memory_order_release); + entry.m_tags[index] = newTag; + + return oldRecord; + } + + void ReleaseRecord(RecordBuffer* record) { + if (record == nullptr) { + return; } -private: - struct Stat; + m_epochManager.RegisterAction([this, record]() { + record->~RecordBuffer(); + this->m_hashTable.template GetAllocator().deallocate(record, + 1U); + }); + } - class Serializer; + void UpdatePerfDataForAdd(const Stat& stat) { + auto& perfData = this->m_hashTable.m_perfData; - RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value) - { - const auto bufferSize = this->m_recordSerializer.CalculateBufferSize(key, value); - auto buffer = Detail::to_raw_pointer( - this->m_hashTable.template GetAllocator().allocate(bufferSize)); - - return this->m_recordSerializer.Serialize(key, value, buffer, bufferSize); - } + if (stat.m_oldValueSize != 0U) { + // Updating the existing record. Therefore, no change in the key size. + perfData.Add(HashTablePerfCounter::TotalValueSize, + static_cast(stat.m_valueSize) - + stat.m_oldValueSize); + } else { + // We are adding a new data instead of replacing. + perfData.Add(HashTablePerfCounter::TotalKeySize, stat.m_keySize); + perfData.Add(HashTablePerfCounter::TotalValueSize, stat.m_valueSize); + perfData.Add( + HashTablePerfCounter::TotalIndexSize, + // Record overhead. + this->m_recordSerializer.CalculateRecordOverhead() + // Entry overhead if created. + + (stat.m_isNewEntryAdded ? sizeof(typename HashTable::Entry) + : 0U)); - RecordBuffer* UpdateRecord( - typename HashTable::Entry& entry, - std::uint8_t index, - RecordBuffer* newRecord, - std::uint8_t newTag) - { - // This function should be called under a lock, so calling with memory_order_relaxed for Load() is safe. - auto& recordHolder = entry.m_dataList[index]; - auto oldRecord = recordHolder.Load(std::memory_order_relaxed); + perfData.Min(HashTablePerfCounter::MinKeySize, stat.m_keySize); + perfData.Max(HashTablePerfCounter::MaxKeySize, stat.m_keySize); - recordHolder.Store(newRecord, std::memory_order_release); - entry.m_tags[index] = newTag; + perfData.Increment(HashTablePerfCounter::RecordsCount); - return oldRecord; - } + if (stat.m_isNewEntryAdded) { + perfData.Increment(HashTablePerfCounter::ChainingEntriesCount); - void ReleaseRecord(RecordBuffer* record) - { - if (record == nullptr) - { - return; + if (stat.m_chainIndex > 1U) { + perfData.Max(HashTablePerfCounter::MaxBucketChainLength, + stat.m_chainIndex); } - - m_epochManager.RegisterAction( - [this, record]() - { - record->~RecordBuffer(); - this->m_hashTable.template GetAllocator().deallocate(record, 1U); - }); + } } - void UpdatePerfDataForAdd(const Stat& stat) - { - auto& perfData = this->m_hashTable.m_perfData; + perfData.Min(HashTablePerfCounter::MinValueSize, stat.m_valueSize); + perfData.Max(HashTablePerfCounter::MaxValueSize, stat.m_valueSize); + } - if (stat.m_oldValueSize != 0U) - { - // Updating the existing record. Therefore, no change in the key size. - perfData.Add(HashTablePerfCounter::TotalValueSize, - static_cast(stat.m_valueSize) - stat.m_oldValueSize); - } - else - { - // We are adding a new data instead of replacing. - perfData.Add(HashTablePerfCounter::TotalKeySize, stat.m_keySize); - perfData.Add(HashTablePerfCounter::TotalValueSize, stat.m_valueSize); - perfData.Add(HashTablePerfCounter::TotalIndexSize, - // Record overhead. - this->m_recordSerializer.CalculateRecordOverhead() - // Entry overhead if created. - + (stat.m_isNewEntryAdded ? sizeof(typename HashTable::Entry) : 0U)); + void UpdatePerfDataForRemove(const Stat& stat) { + auto& perfData = this->m_hashTable.m_perfData; - perfData.Min(HashTablePerfCounter::MinKeySize, stat.m_keySize); - perfData.Max(HashTablePerfCounter::MaxKeySize, stat.m_keySize); + perfData.Decrement(HashTablePerfCounter::RecordsCount); + perfData.Subtract(HashTablePerfCounter::TotalKeySize, stat.m_keySize); + perfData.Subtract(HashTablePerfCounter::TotalValueSize, stat.m_valueSize); + perfData.Subtract(HashTablePerfCounter::TotalIndexSize, + this->m_recordSerializer.CalculateRecordOverhead()); + } - perfData.Increment(HashTablePerfCounter::RecordsCount); - - if (stat.m_isNewEntryAdded) - { - perfData.Increment(HashTablePerfCounter::ChainingEntriesCount); - - if (stat.m_chainIndex > 1U) - { - perfData.Max(HashTablePerfCounter::MaxBucketChainLength, stat.m_chainIndex); - } - } - } - - perfData.Min(HashTablePerfCounter::MinValueSize, stat.m_valueSize); - perfData.Max(HashTablePerfCounter::MaxValueSize, stat.m_valueSize); - } - - void UpdatePerfDataForRemove(const Stat& stat) - { - auto& perfData = this->m_hashTable.m_perfData; - - perfData.Decrement(HashTablePerfCounter::RecordsCount); - perfData.Subtract(HashTablePerfCounter::TotalKeySize, stat.m_keySize); - perfData.Subtract(HashTablePerfCounter::TotalValueSize, stat.m_valueSize); - perfData.Subtract(HashTablePerfCounter::TotalIndexSize, this->m_recordSerializer.CalculateRecordOverhead()); - } - - IEpochActionManager& m_epochManager; + IEpochActionManager& m_epochManager; }; #pragma warning(pop) - // WritableHashTable::Stat struct encapsulates stats for Add()/Remove(). template -struct WritableHashTable::Stat -{ - using KeySize = Key::size_type; - using ValueSize = Value::size_type; +struct WritableHashTable::Stat { + using KeySize = Key::size_type; + using ValueSize = Value::size_type; - explicit Stat( - KeySize keySize = 0U, - ValueSize valueSize = 0U, - ValueSize oldValueSize = 0U, - std::uint32_t chainIndex = 0U, - bool isNewEntryAdded = false) - : m_keySize{ keySize } - , m_valueSize{ valueSize } - , m_oldValueSize{ oldValueSize } - , m_chainIndex{ chainIndex } - , m_isNewEntryAdded{ isNewEntryAdded } - {} + explicit Stat(KeySize keySize = 0U, + ValueSize valueSize = 0U, + ValueSize oldValueSize = 0U, + std::uint32_t chainIndex = 0U, + bool isNewEntryAdded = false) + : m_keySize{keySize}, + m_valueSize{valueSize}, + m_oldValueSize{oldValueSize}, + m_chainIndex{chainIndex}, + m_isNewEntryAdded{isNewEntryAdded} {} - KeySize m_keySize; - ValueSize m_valueSize; - ValueSize m_oldValueSize; - std::uint32_t m_chainIndex; - bool m_isNewEntryAdded; + KeySize m_keySize; + ValueSize m_valueSize; + ValueSize m_oldValueSize; + std::uint32_t m_chainIndex; + bool m_isNewEntryAdded; }; - -// WritableHashTable::Serializer class that implements ISerializer, which provides -// the functionality to serialize the WritableHashTable. +// WritableHashTable::Serializer class that implements ISerializer, which +// provides the functionality to serialize the WritableHashTable. template -class WritableHashTable::Serializer : public IWritableHashTable::ISerializer -{ -public: - explicit Serializer(HashTable& hashTable) - : m_hashTable{ hashTable } - {} +class WritableHashTable::Serializer + : public IWritableHashTable::ISerializer { + public: + explicit Serializer(HashTable& hashTable) : m_hashTable{hashTable} {} - Serializer(const Serializer&) = delete; - Serializer& operator=(const Serializer&) = delete; + Serializer(const Serializer&) = delete; + Serializer& operator=(const Serializer&) = delete; - void Serialize( - std::ostream& stream, - const Utils::Properties& /* properties */) override - { - ReadWrite::Serializer< - HashTable, ReadWrite::ReadOnlyHashTable>{}.Serialize(m_hashTable, stream); - } + void Serialize(std::ostream& stream, + const Utils::Properties& /* properties */) override { + ReadWrite::Serializer{}.Serialize( + m_hashTable, stream); + } -private: - HashTable& m_hashTable; + private: + HashTable& m_hashTable; }; -} // namespace ReadWrite -} // namespace HashTable -} // namespace L4 +} // namespace ReadWrite +} // namespace HashTable +} // namespace L4 diff --git a/inc/L4/HashTable/ReadWrite/Serializer.h b/inc/L4/HashTable/ReadWrite/Serializer.h index 6618bca..f6ec89a 100644 --- a/inc/L4/HashTable/ReadWrite/Serializer.h +++ b/inc/L4/HashTable/ReadWrite/Serializer.h @@ -1,7 +1,7 @@ #pragma once -#include #include +#include #include #include "Epoch/IEpochActionManager.h" #include "Log/PerfCounter.h" @@ -9,27 +9,21 @@ #include "Utils/Exception.h" #include "Utils/Properties.h" -namespace L4 -{ -namespace HashTable -{ -namespace ReadWrite -{ +namespace L4 { +namespace HashTable { +namespace ReadWrite { // Note that the HashTable template parameter in this file is // HashTable::ReadWrite::ReadOnlyHashTable::HashTable. -// However, due to the cyclic dependency, it needs to be passed as a template type. +// However, due to the cyclic dependency, it needs to be passed as a template +// type. +// All the deprecated (previous versions) serializer should be put inside the +// Deprecated namespace. Removing any of the Deprecated serializers from the +// source code will require the major package version change. +namespace Deprecated {} // namespace Deprecated -// All the deprecated (previous versions) serializer should be put inside the Deprecated namespace. -// Removing any of the Deprecated serializers from the source code will require the major package version change. -namespace Deprecated -{ -} // namespace Deprecated - - -namespace Current -{ +namespace Current { constexpr std::uint8_t c_version = 1U; @@ -40,189 +34,185 @@ constexpr std::uint8_t c_version = 1U; // // Otherwise, end of the records. template class ReadOnlyHashTable> -class Serializer -{ -public: - Serializer() = default; +class Serializer { + public: + Serializer() = default; - Serializer(const Serializer&) = delete; - Serializer& operator=(const Serializer&) = delete; + Serializer(const Serializer&) = delete; + Serializer& operator=(const Serializer&) = delete; - void Serialize( - HashTable& hashTable, - std::ostream& stream) const - { - auto& perfData = hashTable.m_perfData; - perfData.Set(HashTablePerfCounter::RecordsCountSavedFromSerializer, 0); + void Serialize(HashTable& hashTable, std::ostream& stream) const { + auto& perfData = hashTable.m_perfData; + perfData.Set(HashTablePerfCounter::RecordsCountSavedFromSerializer, 0); - SerializerHelper helper(stream); + SerializerHelper helper(stream); - helper.Serialize(c_version); + helper.Serialize(c_version); - helper.Serialize(&hashTable.m_setting, sizeof(hashTable.m_setting)); + helper.Serialize(&hashTable.m_setting, sizeof(hashTable.m_setting)); - ReadOnlyHashTable readOnlyHashTable(hashTable); + ReadOnlyHashTable readOnlyHashTable( + hashTable); - auto iterator = readOnlyHashTable.GetIterator(); - while (iterator->MoveNext()) - { - helper.Serialize(true); // Indicates record exists. - const auto key = iterator->GetKey(); - const auto value = iterator->GetValue(); + auto iterator = readOnlyHashTable.GetIterator(); + while (iterator->MoveNext()) { + helper.Serialize(true); // Indicates record exists. + const auto key = iterator->GetKey(); + const auto value = iterator->GetValue(); - helper.Serialize(key.m_size); - helper.Serialize(key.m_data, key.m_size); + helper.Serialize(key.m_size); + helper.Serialize(key.m_data, key.m_size); - helper.Serialize(value.m_size); - helper.Serialize(value.m_data, value.m_size); + helper.Serialize(value.m_size); + helper.Serialize(value.m_data, value.m_size); - perfData.Increment(HashTablePerfCounter::RecordsCountSavedFromSerializer); - } - - helper.Serialize(false); // Indicates the end of records. - - // Flush perf counter so that the values are up to date when GetPerfData() is called. - std::atomic_thread_fence(std::memory_order_release); + perfData.Increment(HashTablePerfCounter::RecordsCountSavedFromSerializer); } + + helper.Serialize(false); // Indicates the end of records. + + // Flush perf counter so that the values are up to date when GetPerfData() + // is called. + std::atomic_thread_fence(std::memory_order_release); + } }; // Current Deserializer used for deserializing hash tables. -template class WritableHashTable> -class Deserializer -{ -public: - explicit Deserializer(const Utils::Properties& /* properties */) - {} +template + class WritableHashTable> +class Deserializer { + public: + explicit Deserializer(const Utils::Properties& /* properties */) {} - Deserializer(const Deserializer&) = delete; - Deserializer& operator=(const Deserializer&) = delete; + Deserializer(const Deserializer&) = delete; + Deserializer& operator=(const Deserializer&) = delete; - typename Memory::template UniquePtr Deserialize( - Memory& memory, - std::istream& stream) const - { - DeserializerHelper helper(stream); + typename Memory::template UniquePtr Deserialize( + Memory& memory, + std::istream& stream) const { + DeserializerHelper helper(stream); - typename HashTable::Setting setting; - helper.Deserialize(setting); + typename HashTable::Setting setting; + helper.Deserialize(setting); - auto hashTable{ memory.template MakeUnique( - setting, - memory.GetAllocator()) }; + auto hashTable{ + memory.template MakeUnique(setting, memory.GetAllocator())}; - EpochActionManager epochActionManager; + EpochActionManager epochActionManager; - WritableHashTable writableHashTable( - *hashTable, - epochActionManager); + WritableHashTable writableHashTable( + *hashTable, epochActionManager); - auto& perfData = hashTable->m_perfData; + auto& perfData = hashTable->m_perfData; - std::vector keyBuffer; - std::vector valueBuffer; + std::vector keyBuffer; + std::vector valueBuffer; - bool hasMoreData = false; - helper.Deserialize(hasMoreData); + bool hasMoreData = false; + helper.Deserialize(hasMoreData); - while (hasMoreData) - { - IReadOnlyHashTable::Key key; - IReadOnlyHashTable::Value value; + while (hasMoreData) { + IReadOnlyHashTable::Key key; + IReadOnlyHashTable::Value value; - helper.Deserialize(key.m_size); - keyBuffer.resize(key.m_size); - helper.Deserialize(keyBuffer.data(), key.m_size); - key.m_data = keyBuffer.data(); + helper.Deserialize(key.m_size); + keyBuffer.resize(key.m_size); + helper.Deserialize(keyBuffer.data(), key.m_size); + key.m_data = keyBuffer.data(); - helper.Deserialize(value.m_size); - valueBuffer.resize(value.m_size); - helper.Deserialize(valueBuffer.data(), value.m_size); - value.m_data = valueBuffer.data(); + helper.Deserialize(value.m_size); + valueBuffer.resize(value.m_size); + helper.Deserialize(valueBuffer.data(), value.m_size); + value.m_data = valueBuffer.data(); - writableHashTable.Add(key, value); + writableHashTable.Add(key, value); - helper.Deserialize(hasMoreData); + helper.Deserialize(hasMoreData); - perfData.Increment(HashTablePerfCounter::RecordsCountLoadedFromSerializer); - } - - // Flush perf counter so that the values are up to date when GetPerfData() is called. - std::atomic_thread_fence(std::memory_order_release); - - return hashTable; + perfData.Increment( + HashTablePerfCounter::RecordsCountLoadedFromSerializer); } -private: - // Deserializer internally uses WritableHashTable for deserialization, therefore - // an implementation of IEpochActionManager is needed. Since all the keys in the hash table - // are expected to be unique, no RegisterAction() should be called. - class EpochActionManager : public IEpochActionManager - { - public: - void RegisterAction(Action&& /* action */) override - { - // Since it is assumed that the serializer is loading from the stream generated by the same serializer, - // it is guaranteed that all the keys are unique (a property of a hash table). Therefore, RegisterAction() - // should not be called by the WritableHashTable. - throw RuntimeException("RegisterAction() should not be called from the serializer."); - } - }; + // Flush perf counter so that the values are up to date when GetPerfData() + // is called. + std::atomic_thread_fence(std::memory_order_release); + + return hashTable; + } + + private: + // Deserializer internally uses WritableHashTable for deserialization, + // therefore an implementation of IEpochActionManager is needed. Since all the + // keys in the hash table are expected to be unique, no RegisterAction() + // should be called. + class EpochActionManager : public IEpochActionManager { + public: + void RegisterAction(Action&& /* action */) override { + // Since it is assumed that the serializer is loading from the stream + // generated by the same serializer, it is guaranteed that all the keys + // are unique (a property of a hash table). Therefore, RegisterAction() + // should not be called by the WritableHashTable. + throw RuntimeException( + "RegisterAction() should not be called from the serializer."); + } + }; }; -} // namespace Current - +} // namespace Current // Serializer is the main driver for serializing a hash table. // It always uses the Current::Serializer for serializing a hash table. template class ReadOnlyHashTable> -class Serializer -{ -public: - Serializer() = default; - Serializer(const Serializer&) = delete; - Serializer& operator=(const Serializer&) = delete; +class Serializer { + public: + Serializer() = default; + Serializer(const Serializer&) = delete; + Serializer& operator=(const Serializer&) = delete; - void Serialize(HashTable& hashTable, std::ostream& stream) const - { - Current::Serializer{}.Serialize(hashTable, stream); - } + void Serialize(HashTable& hashTable, std::ostream& stream) const { + Current::Serializer{}.Serialize(hashTable, + stream); + } }; -// Deserializer is the main driver for deserializing the input stream to create a hash table. -template class WritableHashTable> -class Deserializer -{ -public: - explicit Deserializer(const Utils::Properties& properties) - : m_properties(properties) - {} +// Deserializer is the main driver for deserializing the input stream to create +// a hash table. +template + class WritableHashTable> +class Deserializer { + public: + explicit Deserializer(const Utils::Properties& properties) + : m_properties(properties) {} - Deserializer(const Deserializer&) = delete; - Deserializer& operator=(const Deserializer&) = delete; + Deserializer(const Deserializer&) = delete; + Deserializer& operator=(const Deserializer&) = delete; - typename Memory::template UniquePtr Deserialize( - Memory& memory, - std::istream& stream) const - { - std::uint8_t version = 0U; - DeserializerHelper(stream).Deserialize(version); + typename Memory::template UniquePtr Deserialize( + Memory& memory, + std::istream& stream) const { + std::uint8_t version = 0U; + DeserializerHelper(stream).Deserialize(version); - switch (version) - { - case Current::c_version: - return Current::Deserializer{ m_properties }.Deserialize(memory, stream); - default: - boost::format err("Unsupported version '%1%' is given."); - err % version; - throw RuntimeException(err.str()); - } + switch (version) { + case Current::c_version: + return Current::Deserializer{ + m_properties} + .Deserialize(memory, stream); + default: + boost::format err("Unsupported version '%1%' is given."); + err % version; + throw RuntimeException(err.str()); } + } -private: - const Utils::Properties& m_properties; + private: + const Utils::Properties& m_properties; }; -} // namespace ReadWrite -} // namespace HashTable -} // namespace L4 - +} // namespace ReadWrite +} // namespace HashTable +} // namespace L4 diff --git a/inc/L4/Interprocess/Connection/ConnectionMonitor.h b/inc/L4/Interprocess/Connection/ConnectionMonitor.h index 28668bb..7992752 100644 --- a/inc/L4/Interprocess/Connection/ConnectionMonitor.h +++ b/inc/L4/Interprocess/Connection/ConnectionMonitor.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -8,12 +9,9 @@ #include "Interprocess/Connection/EndPointInfo.h" #include "Interprocess/Utils/Handle.h" -namespace L4 -{ -namespace Interprocess -{ -namespace Connection -{ +namespace L4 { +namespace Interprocess { +namespace Connection { // ConnectionMonitor monitors any registered end points. // ConnectionMonitor creates a kernel event for local end point, @@ -22,91 +20,84 @@ namespace Connection // is closed, the callback registered is triggered and the remote endpoint // is removed from the ConnectionMonitor after the callback is finished.. class ConnectionMonitor - : public std::enable_shared_from_this -{ -public: - using Callback = std::function; + : public std::enable_shared_from_this { + public: + using Callback = std::function; - ConnectionMonitor(); - ~ConnectionMonitor(); + ConnectionMonitor(); + ~ConnectionMonitor(); - const EndPointInfo& GetLocalEndPointInfo() const; + const EndPointInfo& GetLocalEndPointInfo() const; - std::size_t GetRemoteConnectionsCount() const; + std::size_t GetRemoteConnectionsCount() const; - void Register(const EndPointInfo& remoteEndPoint, Callback callback); + void Register(const EndPointInfo& remoteEndPoint, Callback callback); - void UnRegister(const EndPointInfo& remoteEndPoint); + void UnRegister(const EndPointInfo& remoteEndPoint); - ConnectionMonitor(const ConnectionMonitor&) = delete; - ConnectionMonitor& operator=(const ConnectionMonitor&) = delete; + ConnectionMonitor(const ConnectionMonitor&) = delete; + ConnectionMonitor& operator=(const ConnectionMonitor&) = delete; -private: - class HandleMonitor; + private: + class HandleMonitor; - // UnRegister() removes the unregistered end points from m_remoteEvents. - void UnRegister() const; + // UnRegister() removes the unregistered end points from m_remoteEvents. + void UnRegister() const; - const EndPointInfo m_localEndPoint; + const EndPointInfo m_localEndPoint; - Utils::Handle m_localEvent; + Utils::Handle m_localEvent; - mutable std::map> m_remoteMonitors; + mutable std::map> + m_remoteMonitors; - mutable std::mutex m_mutexOnRemoteMonitors; + mutable std::mutex m_mutexOnRemoteMonitors; - mutable std::vector m_unregisteredEndPoints; + mutable std::vector m_unregisteredEndPoints; - mutable std::mutex m_mutexOnUnregisteredEndPoints; + mutable std::mutex m_mutexOnUnregisteredEndPoints; }; - // ConnectionMonitor::HandleMonitor opens the given endpoint's process // and event handles and waits for any event triggers. -class ConnectionMonitor::HandleMonitor -{ -public: - HandleMonitor( - const EndPointInfo& remoteEndPoint, - Callback callback); +class ConnectionMonitor::HandleMonitor { + public: + HandleMonitor(const EndPointInfo& remoteEndPoint, Callback callback); - HandleMonitor(const HandleMonitor&) = delete; - HandleMonitor& operator=(const HandleMonitor&) = delete; + HandleMonitor(const HandleMonitor&) = delete; + HandleMonitor& operator=(const HandleMonitor&) = delete; -private: - class Waiter; + private: + class Waiter; - std::unique_ptr m_eventWaiter; - std::unique_ptr m_processWaiter; + std::unique_ptr m_eventWaiter; + std::unique_ptr m_processWaiter; }; - // ConnectionMonitor::HandleMonitor::Waiter waits on the given handle and calls // the given callback when an event is triggered on the handle. -class ConnectionMonitor::HandleMonitor::Waiter -{ -public: - using Callback = std::function; +class ConnectionMonitor::HandleMonitor::Waiter { + public: + using Callback = std::function; - Waiter(Utils::Handle handle, Callback callback); + Waiter(Utils::Handle handle, Callback callback); - ~Waiter(); + ~Waiter(); - Waiter(const Waiter&) = delete; - Waiter& operator=(const Waiter&) = delete; + Waiter(const Waiter&) = delete; + Waiter& operator=(const Waiter&) = delete; -private: - static VOID CALLBACK OnEvent( - PTP_CALLBACK_INSTANCE instance, - PVOID context, - PTP_WAIT wait, - TP_WAIT_RESULT waitResult); + private: + static VOID CALLBACK OnEvent(PTP_CALLBACK_INSTANCE instance, + PVOID context, + PTP_WAIT wait, + TP_WAIT_RESULT waitResult); - Utils::Handle m_handle; - Callback m_callback; - std::unique_ptr m_wait; + Utils::Handle m_handle; + Callback m_callback; + std::unique_ptr m_wait; }; -} // namespace Connection -} // namespace Interprocess -} // namespace L4 +} // namespace Connection +} // namespace Interprocess +} // namespace L4 diff --git a/inc/L4/Interprocess/Connection/EndPointInfo.h b/inc/L4/Interprocess/Connection/EndPointInfo.h index f528fe1..ba4cdfb 100644 --- a/inc/L4/Interprocess/Connection/EndPointInfo.h +++ b/inc/L4/Interprocess/Connection/EndPointInfo.h @@ -1,41 +1,31 @@ #pragma once -#include #include +#include -namespace L4 -{ -namespace Interprocess -{ -namespace Connection -{ +namespace L4 { +namespace Interprocess { +namespace Connection { // EndPointInfo struct encapsulates the connection end point // information across process boundaries. -struct EndPointInfo -{ - explicit EndPointInfo( - std::uint32_t pid = 0U, - const boost::uuids::uuid& uuid = {}) - : m_pid{ pid } - , m_uuid{ uuid } - {} +struct EndPointInfo { + explicit EndPointInfo(std::uint32_t pid = 0U, + const boost::uuids::uuid& uuid = {}) + : m_pid{pid}, m_uuid{uuid} {} - bool operator==(const EndPointInfo& other) const - { - return (m_pid == other.m_pid) - && (m_uuid == other.m_uuid); - } + bool operator==(const EndPointInfo& other) const { + return (m_pid == other.m_pid) && (m_uuid == other.m_uuid); + } - bool operator<(const EndPointInfo& other) const - { - return m_uuid < other.m_uuid; - } + bool operator<(const EndPointInfo& other) const { + return m_uuid < other.m_uuid; + } - std::uint32_t m_pid; - boost::uuids::uuid m_uuid; + std::uint32_t m_pid; + boost::uuids::uuid m_uuid; }; -} // namespace Connection -} // namespace Interprocess -} // namespace L4 +} // namespace Connection +} // namespace Interprocess +} // namespace L4 diff --git a/inc/L4/Interprocess/Connection/EndPointInfoUtils.h b/inc/L4/Interprocess/Connection/EndPointInfoUtils.h index 4cbed7c..9de130c 100644 --- a/inc/L4/Interprocess/Connection/EndPointInfoUtils.h +++ b/inc/L4/Interprocess/Connection/EndPointInfoUtils.h @@ -3,29 +3,23 @@ #include #include "Interprocess/Connection/EndPointInfo.h" -namespace L4 -{ -namespace Interprocess -{ -namespace Connection -{ +namespace L4 { +namespace Interprocess { +namespace Connection { // EndPointInfoFactory creates an EndPointInfo object with the current // process id and a random uuid. -class EndPointInfoFactory -{ -public: - EndPointInfo Create() const; +class EndPointInfoFactory { + public: + EndPointInfo Create() const; }; - // StringConverter provides a functionality to convert EndPointInfo to a string. -class StringConverter -{ -public: - std::string operator()(const EndPointInfo& endPoint) const; +class StringConverter { + public: + std::string operator()(const EndPointInfo& endPoint) const; }; -} // namespace Connection -} // namespace Interprocess -} // namespace L4 +} // namespace Connection +} // namespace Interprocess +} // namespace L4 diff --git a/inc/L4/Interprocess/Container/List.h b/inc/L4/Interprocess/Container/List.h index 3151560..d4850d5 100644 --- a/inc/L4/Interprocess/Container/List.h +++ b/inc/L4/Interprocess/Container/List.h @@ -2,18 +2,13 @@ #include -namespace L4 -{ -namespace Interprocess -{ -namespace Container -{ - +namespace L4 { +namespace Interprocess { +namespace Container { template using List = boost::interprocess::list; - -} // namespace Container -} // namespace Interprocess -} // namespace L4 \ No newline at end of file +} // namespace Container +} // namespace Interprocess +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Interprocess/Container/String.h b/inc/L4/Interprocess/Container/String.h index bf1c9ad..a8b3d81 100644 --- a/inc/L4/Interprocess/Container/String.h +++ b/inc/L4/Interprocess/Container/String.h @@ -2,18 +2,14 @@ #include -namespace L4 -{ -namespace Interprocess -{ -namespace Container -{ - +namespace L4 { +namespace Interprocess { +namespace Container { template -using String = boost::interprocess::basic_string, Allocator>; +using String = + boost::interprocess::basic_string, Allocator>; - -} // namespace Container -} // namespace Interprocess -} // namespace L4 \ No newline at end of file +} // namespace Container +} // namespace Interprocess +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Interprocess/Container/Vector.h b/inc/L4/Interprocess/Container/Vector.h index d851db4..d54d5df 100644 --- a/inc/L4/Interprocess/Container/Vector.h +++ b/inc/L4/Interprocess/Container/Vector.h @@ -2,18 +2,13 @@ #include -namespace L4 -{ -namespace Interprocess -{ -namespace Container -{ - +namespace L4 { +namespace Interprocess { +namespace Container { template using Vector = boost::interprocess::vector; - -} // namespace Container -} // namespace Interprocess -} // namespace L4 \ No newline at end of file +} // namespace Container +} // namespace Interprocess +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Interprocess/Utils/Handle.h b/inc/L4/Interprocess/Utils/Handle.h index 67388c9..b1077dd 100644 --- a/inc/L4/Interprocess/Utils/Handle.h +++ b/inc/L4/Interprocess/Utils/Handle.h @@ -4,34 +4,31 @@ #include #include "Utils/Windows.h" -namespace L4 -{ -namespace Interprocess -{ -namespace Utils -{ +namespace L4 { +namespace Interprocess { +namespace Utils { // Handle is a RAII class that manages the life time of the given HANDLE. -class Handle -{ -public: - // If verifyHandle is true, it checks whether a given handle is valid. - explicit Handle(HANDLE handle, bool verifyHandle = false); +class Handle { + public: + // If verifyHandle is true, it checks whether a given handle is valid. + explicit Handle(HANDLE handle, bool verifyHandle = false); - Handle(Handle&& other); + Handle(Handle&& other); - explicit operator HANDLE() const; + explicit operator HANDLE() const; - Handle(const Handle&) = delete; - Handle& operator=(const Handle&) = delete; - Handle& operator=(Handle&&) = delete; + Handle(const Handle&) = delete; + Handle& operator=(const Handle&) = delete; + Handle& operator=(Handle&&) = delete; -private: - HANDLE Verify(HANDLE handle, bool verifyHandle) const; + private: + HANDLE Verify(HANDLE handle, bool verifyHandle) const; - std::unique_ptr, decltype(&::CloseHandle)> m_handle; + std::unique_ptr, decltype(&::CloseHandle)> + m_handle; }; -} // namespace Utils -} // namespace Interprocess -} // namespace L4 +} // namespace Utils +} // namespace Interprocess +} // namespace L4 diff --git a/inc/L4/LocalMemory/Context.h b/inc/L4/LocalMemory/Context.h index 7dc8de3..4b1f227 100644 --- a/inc/L4/LocalMemory/Context.h +++ b/inc/L4/LocalMemory/Context.h @@ -4,52 +4,42 @@ #include "EpochManager.h" #include "HashTableManager.h" -namespace L4 -{ -namespace LocalMemory -{ +namespace L4 { +namespace LocalMemory { -class Context : private EpochRefPolicy -{ -public: - Context( - HashTableManager& hashTableManager, - EpochManager::TheEpochRefManager& epochRefManager) - : EpochRefPolicy(epochRefManager) - , m_hashTableManager{ hashTableManager } - {} +class Context : private EpochRefPolicy { + public: + Context(HashTableManager& hashTableManager, + EpochManager::TheEpochRefManager& epochRefManager) + : EpochRefPolicy(epochRefManager), + m_hashTableManager{hashTableManager} {} - Context(Context&& context) - : EpochRefPolicy(std::move(context)) - , m_hashTableManager{ context.m_hashTableManager } - {} + Context(Context&& context) + : EpochRefPolicy(std::move(context)), + m_hashTableManager{context.m_hashTableManager} {} - const IReadOnlyHashTable& operator[](const char* name) const - { - return m_hashTableManager.GetHashTable(name); - } + const IReadOnlyHashTable& operator[](const char* name) const { + return m_hashTableManager.GetHashTable(name); + } - IWritableHashTable& operator[](const char* name) - { - return m_hashTableManager.GetHashTable(name); - } + IWritableHashTable& operator[](const char* name) { + return m_hashTableManager.GetHashTable(name); + } - const IReadOnlyHashTable& operator[](std::size_t index) const - { - return m_hashTableManager.GetHashTable(index); - } + const IReadOnlyHashTable& operator[](std::size_t index) const { + return m_hashTableManager.GetHashTable(index); + } - IWritableHashTable& operator[](std::size_t index) - { - return m_hashTableManager.GetHashTable(index); - } + IWritableHashTable& operator[](std::size_t index) { + return m_hashTableManager.GetHashTable(index); + } - Context(const Context&) = delete; - Context& operator=(const Context&) = delete; + Context(const Context&) = delete; + Context& operator=(const Context&) = delete; -private: - HashTableManager& m_hashTableManager; + private: + HashTableManager& m_hashTableManager; }; -} // namespace LocalMemory -} // namespace L4 +} // namespace LocalMemory +} // namespace L4 diff --git a/inc/L4/LocalMemory/EpochManager.h b/inc/L4/LocalMemory/EpochManager.h index 70e032f..0b0cd8a 100644 --- a/inc/L4/LocalMemory/EpochManager.h +++ b/inc/L4/LocalMemory/EpochManager.h @@ -10,119 +10,113 @@ #include "Utils/Lock.h" #include "Utils/RunningThread.h" -namespace L4 -{ -namespace LocalMemory -{ +namespace L4 { +namespace LocalMemory { // EpochManager aggregates epoch-related functionalities such as adding/removing -// client epoch queues, registering/performing actions, and updating the epoch counters. -class EpochManager : public IEpochActionManager -{ -public: - using TheEpochQueue = EpochQueue< - boost::shared_lock_guard, - std::lock_guard>; +// client epoch queues, registering/performing actions, and updating the epoch +// counters. +class EpochManager : public IEpochActionManager { + public: + using TheEpochQueue = + EpochQueue, + std::lock_guard>; - using TheEpochRefManager = EpochRefManager; + using TheEpochRefManager = EpochRefManager; - EpochManager( - const EpochManagerConfig& config, - ServerPerfData& perfData) - : m_perfData{ perfData } - , m_config{ config } - , m_currentEpochCounter{ 0U } - , m_epochQueue{ - m_currentEpochCounter, - m_config.m_epochQueueSize } - , m_epochRefManager{ m_epochQueue } - , m_epochCounterManager{ m_epochQueue } - , m_epochActionManager{ config.m_numActionQueues } - , m_processingThread{ - m_config.m_epochProcessingInterval, - [this] - { - this->Remove(); - this->Add(); - }} - {} + EpochManager(const EpochManagerConfig& config, ServerPerfData& perfData) + : m_perfData{perfData}, + m_config{config}, + m_currentEpochCounter{0U}, + m_epochQueue{m_currentEpochCounter, m_config.m_epochQueueSize}, + m_epochRefManager{m_epochQueue}, + m_epochCounterManager{m_epochQueue}, + m_epochActionManager{config.m_numActionQueues}, + m_processingThread{m_config.m_epochProcessingInterval, [this] { + this->Remove(); + this->Add(); + }} {} - TheEpochRefManager& GetEpochRefManager() - { - return m_epochRefManager; - } + TheEpochRefManager& GetEpochRefManager() { return m_epochRefManager; } - void RegisterAction(Action&& action) override - { - m_epochActionManager.RegisterAction(m_currentEpochCounter, std::move(action)); - m_perfData.Increment(ServerPerfCounter::PendingActionsCount); - } + void RegisterAction(Action&& action) override { + m_epochActionManager.RegisterAction(m_currentEpochCounter, + std::move(action)); + m_perfData.Increment(ServerPerfCounter::PendingActionsCount); + } - EpochManager(const EpochManager&) = delete; - EpochManager& operator=(const EpochManager&) = delete; + EpochManager(const EpochManager&) = delete; + EpochManager& operator=(const EpochManager&) = delete; -private: - using TheEpochCounterManager = EpochCounterManager; + private: + using TheEpochCounterManager = EpochCounterManager; - using ProcessingThread = Utils::RunningThread>; + using ProcessingThread = Utils::RunningThread>; - // Enqueues a new epoch whose counter value is last counter + 1. - // This is called from the server side. - void Add() - { - // Incrementing the global epoch counter before incrementing per-connection - // epoch counter is safe (not so the other way around). If the server process is - // registering an action at the m_currentEpochCounter in RegisterAction(), - // it is happening in the "future," and this means that if the client is referencing - // the memory to be deleted in the "future," it will be safe. - ++m_currentEpochCounter; + // Enqueues a new epoch whose counter value is last counter + 1. + // This is called from the server side. + void Add() { + // Incrementing the global epoch counter before incrementing per-connection + // epoch counter is safe (not so the other way around). If the server + // process is registering an action at the m_currentEpochCounter in + // RegisterAction(), it is happening in the "future," and this means that if + // the client is referencing the memory to be deleted in the "future," it + // will be safe. + ++m_currentEpochCounter; - m_epochCounterManager.AddNewEpoch(); - } + m_epochCounterManager.AddNewEpoch(); + } - // Dequeues any epochs whose ref counter is 0, meaning there is no reference at that time. - void Remove() - { - const auto oldestEpochCounter = m_epochCounterManager.RemoveUnreferenceEpochCounters(); + // Dequeues any epochs whose ref counter is 0, meaning there is no reference + // at that time. + void Remove() { + const auto oldestEpochCounter = + m_epochCounterManager.RemoveUnreferenceEpochCounters(); - const auto numActionsPerformed = m_epochActionManager.PerformActions(oldestEpochCounter); + const auto numActionsPerformed = + m_epochActionManager.PerformActions(oldestEpochCounter); - m_perfData.Subtract(ServerPerfCounter::PendingActionsCount, numActionsPerformed); - m_perfData.Set(ServerPerfCounter::LastPerformedActionsCount, numActionsPerformed); - m_perfData.Set(ServerPerfCounter::OldestEpochCounterInQueue, oldestEpochCounter); - m_perfData.Set(ServerPerfCounter::LatestEpochCounterInQueue, m_currentEpochCounter); - } + m_perfData.Subtract(ServerPerfCounter::PendingActionsCount, + numActionsPerformed); + m_perfData.Set(ServerPerfCounter::LastPerformedActionsCount, + numActionsPerformed); + m_perfData.Set(ServerPerfCounter::OldestEpochCounterInQueue, + oldestEpochCounter); + m_perfData.Set(ServerPerfCounter::LatestEpochCounterInQueue, + m_currentEpochCounter); + } - // Reference to the performance data. - ServerPerfData& m_perfData; + // Reference to the performance data. + ServerPerfData& m_perfData; - // Configuration related to epoch manager. - EpochManagerConfig m_config; + // Configuration related to epoch manager. + EpochManagerConfig m_config; - // The global current epoch counter. + // The global current epoch counter. #if defined(_MSC_VER) - std::atomic_uint64_t m_currentEpochCounter; + std::atomic_uint64_t m_currentEpochCounter; #else - std::atomic m_currentEpochCounter; + std::atomic m_currentEpochCounter; #endif - // Epoch queue. - TheEpochQueue m_epochQueue; + // Epoch queue. + TheEpochQueue m_epochQueue; - // Handles adding/decrementing ref counts. - TheEpochRefManager m_epochRefManager; + // Handles adding/decrementing ref counts. + TheEpochRefManager m_epochRefManager; - // Handles adding new epoch and finding the epoch counts that have zero ref counts. - TheEpochCounterManager m_epochCounterManager; + // Handles adding new epoch and finding the epoch counts that have zero ref + // counts. + TheEpochCounterManager m_epochCounterManager; - // Handles registering/performing actions. - EpochActionManager m_epochActionManager; + // Handles registering/performing actions. + EpochActionManager m_epochActionManager; - // Thread responsible for updating the current epoch counter, - // removing the unreferenced epoch counter, etc. - // Should be the last member so that it gets destroyed first. - ProcessingThread m_processingThread; + // Thread responsible for updating the current epoch counter, + // removing the unreferenced epoch counter, etc. + // Should be the last member so that it gets destroyed first. + ProcessingThread m_processingThread; }; -} // namespace LocalMemory -} // namespace L4 +} // namespace LocalMemory +} // namespace L4 diff --git a/inc/L4/LocalMemory/HashTableManager.h b/inc/L4/LocalMemory/HashTableManager.h index 7e9f967..048aa0a 100644 --- a/inc/L4/LocalMemory/HashTableManager.h +++ b/inc/L4/LocalMemory/HashTableManager.h @@ -3,104 +3,98 @@ #include #include #include -#include "LocalMemory/Memory.h" #include "Epoch/IEpochActionManager.h" +#include "HashTable/Cache/HashTable.h" #include "HashTable/Config.h" #include "HashTable/ReadWrite/HashTable.h" #include "HashTable/ReadWrite/Serializer.h" -#include "HashTable/Cache/HashTable.h" +#include "LocalMemory/Memory.h" #include "Utils/Containers.h" #include "Utils/Exception.h" -namespace L4 -{ -namespace LocalMemory -{ +namespace L4 { +namespace LocalMemory { -class HashTableManager -{ -public: - template - std::size_t Add( - const HashTableConfig& config, - IEpochActionManager& epochActionManager, - Allocator allocator) - { - if (m_hashTableNameToIndex.find(config.m_name) != m_hashTableNameToIndex.end()) - { - throw RuntimeException("Same hash table name already exists."); - } +class HashTableManager { + public: + template + std::size_t Add(const HashTableConfig& config, + IEpochActionManager& epochActionManager, + Allocator allocator) { + if (m_hashTableNameToIndex.find(config.m_name) != + m_hashTableNameToIndex.end()) { + throw RuntimeException("Same hash table name already exists."); + } - const auto& cacheConfig = config.m_cache; - const auto& serializerConfig = config.m_serializer; + const auto& cacheConfig = config.m_cache; + const auto& serializerConfig = config.m_serializer; - if (cacheConfig && serializerConfig) - { - throw RuntimeException( - "Constructing cache hash table via serializer is not supported."); - } + if (cacheConfig && serializerConfig) { + throw RuntimeException( + "Constructing cache hash table via serializer is not supported."); + } - using namespace HashTable; + using namespace HashTable; - using InternalHashTable = typename ReadWrite::WritableHashTable::HashTable; - using Memory = typename LocalMemory::Memory; + using InternalHashTable = + typename ReadWrite::WritableHashTable::HashTable; + using Memory = typename LocalMemory::Memory; - Memory memory{ allocator }; + Memory memory{allocator}; - std::shared_ptr internalHashTable = (serializerConfig && serializerConfig->m_stream != nullptr) - ? ReadWrite::Deserializer( - serializerConfig->m_properties.get_value_or(HashTableConfig::Serializer::Properties())). - Deserialize( - memory, - *(serializerConfig->m_stream)) + std::shared_ptr internalHashTable = + (serializerConfig && serializerConfig->m_stream != nullptr) + ? ReadWrite::Deserializer( + serializerConfig->m_properties.get_value_or( + HashTableConfig::Serializer::Properties())) + .Deserialize(memory, *(serializerConfig->m_stream)) : memory.template MakeUnique( - typename InternalHashTable::Setting{ - config.m_setting.m_numBuckets, - (std::max)(config.m_setting.m_numBucketsPerMutex.get_value_or(1U), 1U), - config.m_setting.m_fixedKeySize.get_value_or(0U), - config.m_setting.m_fixedValueSize.get_value_or(0U) }, - memory.GetAllocator()); + typename InternalHashTable::Setting{ + config.m_setting.m_numBuckets, + (std::max)( + config.m_setting.m_numBucketsPerMutex.get_value_or( + 1U), + 1U), + config.m_setting.m_fixedKeySize.get_value_or(0U), + config.m_setting.m_fixedValueSize.get_value_or(0U)}, + memory.GetAllocator()); - auto hashTable = - cacheConfig - ? std::make_unique>( - *internalHashTable, - epochActionManager, - cacheConfig->m_maxCacheSizeInBytes, - cacheConfig->m_recordTimeToLive, - cacheConfig->m_forceTimeBasedEviction) - : std::make_unique>( - *internalHashTable, - epochActionManager); + auto hashTable = + cacheConfig ? std::make_unique>( + *internalHashTable, epochActionManager, + cacheConfig->m_maxCacheSizeInBytes, + cacheConfig->m_recordTimeToLive, + cacheConfig->m_forceTimeBasedEviction) + : std::make_unique>( + *internalHashTable, epochActionManager); - m_internalHashTables.emplace_back(std::move(internalHashTable)); - m_hashTables.emplace_back(std::move(hashTable)); + m_internalHashTables.emplace_back(std::move(internalHashTable)); + m_hashTables.emplace_back(std::move(hashTable)); - const auto newIndex = m_hashTables.size() - 1; + const auto newIndex = m_hashTables.size() - 1; - m_hashTableNameToIndex.emplace(config.m_name, newIndex); + m_hashTableNameToIndex.emplace(config.m_name, newIndex); - return newIndex; - } + return newIndex; + } - IWritableHashTable& GetHashTable(const char* name) - { - assert(m_hashTableNameToIndex.find(name) != m_hashTableNameToIndex.cend()); - return GetHashTable(m_hashTableNameToIndex.find(name)->second); - } + IWritableHashTable& GetHashTable(const char* name) { + assert(m_hashTableNameToIndex.find(name) != m_hashTableNameToIndex.cend()); + return GetHashTable(m_hashTableNameToIndex.find(name)->second); + } - IWritableHashTable& GetHashTable(std::size_t index) - { - assert(index < m_hashTables.size()); - return *m_hashTables[index]; - } + IWritableHashTable& GetHashTable(std::size_t index) { + assert(index < m_hashTables.size()); + return *m_hashTables[index]; + } -private: - Utils::StdStringKeyMap m_hashTableNameToIndex; + private: + Utils::StdStringKeyMap m_hashTableNameToIndex; - std::vector m_internalHashTables; - std::vector> m_hashTables; + std::vector m_internalHashTables; + std::vector> m_hashTables; }; -} // namespace LocalMemory -} // namespace L4 +} // namespace LocalMemory +} // namespace L4 diff --git a/inc/L4/LocalMemory/HashTableService.h b/inc/L4/LocalMemory/HashTableService.h index ea02087..c026188 100644 --- a/inc/L4/LocalMemory/HashTableService.h +++ b/inc/L4/LocalMemory/HashTableService.h @@ -5,42 +5,35 @@ #include "HashTable/Config.h" #include "Log/PerfCounter.h" -namespace L4 -{ -namespace LocalMemory -{ +namespace L4 { +namespace LocalMemory { -class HashTableService -{ -public: - explicit HashTableService( - const EpochManagerConfig& epochManagerConfig = EpochManagerConfig()) - : m_epochManager{ epochManagerConfig, m_serverPerfData } - {} +class HashTableService { + public: + explicit HashTableService( + const EpochManagerConfig& epochManagerConfig = EpochManagerConfig()) + : m_epochManager{epochManagerConfig, m_serverPerfData} {} - template > - std::size_t AddHashTable( - const HashTableConfig& config, - Allocator allocator = Allocator()) - { - return m_hashTableManager.Add(config, m_epochManager, allocator); - } + template > + std::size_t AddHashTable(const HashTableConfig& config, + Allocator allocator = Allocator()) { + return m_hashTableManager.Add(config, m_epochManager, allocator); + } - Context GetContext() - { - return Context(m_hashTableManager, m_epochManager.GetEpochRefManager()); - } + Context GetContext() { + return Context(m_hashTableManager, m_epochManager.GetEpochRefManager()); + } -private: - ServerPerfData m_serverPerfData; + private: + ServerPerfData m_serverPerfData; - HashTableManager m_hashTableManager; + HashTableManager m_hashTableManager; - // Make sure HashTableManager is destroyed before EpochManager b/c - // it is possible that EpochManager could be processing Epoch Actions - // on hash tables. - EpochManager m_epochManager; + // Make sure HashTableManager is destroyed before EpochManager b/c + // it is possible that EpochManager could be processing Epoch Actions + // on hash tables. + EpochManager m_epochManager; }; -} // namespace LocalMemory -} // namespace L4 \ No newline at end of file +} // namespace LocalMemory +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/LocalMemory/Memory.h b/inc/L4/LocalMemory/Memory.h index b68e684..14be2c5 100644 --- a/inc/L4/LocalMemory/Memory.h +++ b/inc/L4/LocalMemory/Memory.h @@ -1,50 +1,40 @@ #pragma once -namespace L4 -{ -namespace LocalMemory -{ +namespace L4 { +namespace LocalMemory { // Simple local memory model that stores the given allocator object. template -class Memory -{ -public: - using Allocator = Alloc; +class Memory { + public: + using Allocator = Alloc; - template - using UniquePtr = std::unique_ptr; + template + using UniquePtr = std::unique_ptr; - template - using Deleter = typename std::default_delete; + template + using Deleter = typename std::default_delete; - explicit Memory(Allocator allocator = Allocator()) - : m_allocator{ allocator } - {} + explicit Memory(Allocator allocator = Allocator()) : m_allocator{allocator} {} - template - auto MakeUnique(Args&&... args) - { - return std::make_unique(std::forward(args)...); - } + template + auto MakeUnique(Args&&... args) { + return std::make_unique(std::forward(args)...); + } - Allocator GetAllocator() - { - return Allocator(m_allocator); - } + Allocator GetAllocator() { return Allocator(m_allocator); } - template - auto GetDeleter() - { - return Deleter(); - } + template + auto GetDeleter() { + return Deleter(); + } - Memory(const Memory&) = delete; - Memory& operator=(const Memory&) = delete; + Memory(const Memory&) = delete; + Memory& operator=(const Memory&) = delete; -private: - Allocator m_allocator; + private: + Allocator m_allocator; }; -} // namespace LocalMemory -} // namespace L4 \ No newline at end of file +} // namespace LocalMemory +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Log/IPerfLogger.h b/inc/L4/Log/IPerfLogger.h index a668d8b..f079c3b 100644 --- a/inc/L4/Log/IPerfLogger.h +++ b/inc/L4/Log/IPerfLogger.h @@ -3,36 +3,29 @@ #include #include "PerfCounter.h" - -namespace L4 -{ - +namespace L4 { // IPerfLogger interface. -struct IPerfLogger -{ - struct IData; +struct IPerfLogger { + struct IData; - virtual ~IPerfLogger() = default; + virtual ~IPerfLogger() = default; - virtual void Log(const IData& data) = 0; + virtual void Log(const IData& data) = 0; }; -// IPerfLogger::IData interface that provides access to ServerPerfData and the aggregated HashTablePerfData. -// Note that the user of IPerfLogger only needs to implement IPerfLogger since IPerfLogger::IData is -// implemented internally. -struct IPerfLogger::IData -{ - using HashTablesPerfData = std::map< - std::string, - std::reference_wrapper>; +// IPerfLogger::IData interface that provides access to ServerPerfData and the +// aggregated HashTablePerfData. Note that the user of IPerfLogger only needs to +// implement IPerfLogger since IPerfLogger::IData is implemented internally. +struct IPerfLogger::IData { + using HashTablesPerfData = + std::map>; - virtual ~IData() = default; + virtual ~IData() = default; - virtual const ServerPerfData& GetServerPerfData() const = 0; + virtual const ServerPerfData& GetServerPerfData() const = 0; - virtual const HashTablesPerfData& GetHashTablesPerfData() const = 0; + virtual const HashTablesPerfData& GetHashTablesPerfData() const = 0; }; - -} // namespace L4 \ No newline at end of file +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Log/PerfCounter.h b/inc/L4/Log/PerfCounter.h index 9a5979a..f797818 100644 --- a/inc/L4/Log/PerfCounter.h +++ b/inc/L4/Log/PerfCounter.h @@ -1,223 +1,191 @@ #pragma once -#include #include #include -#include #include +#include +#include -namespace L4 -{ +namespace L4 { -enum class ServerPerfCounter : std::uint16_t -{ - // Connection Manager - ClientConnectionsCount = 0U, +enum class ServerPerfCounter : std::uint16_t { + // Connection Manager + ClientConnectionsCount = 0U, - // EpochManager - OldestEpochCounterInQueue, - LatestEpochCounterInQueue, - PendingActionsCount, - LastPerformedActionsCount, + // EpochManager + OldestEpochCounterInQueue, + LatestEpochCounterInQueue, + PendingActionsCount, + LastPerformedActionsCount, - Count + Count }; -const std::array< - const char*, - static_cast(ServerPerfCounter::Count)> c_serverPerfCounterNames = -{ - // Connection Manager - "ClientConnectionsCount", +const std::array(ServerPerfCounter::Count)> + c_serverPerfCounterNames = { + // Connection Manager + "ClientConnectionsCount", - // EpochManager - "OldestEpochCounterInQueue", - "LatestEpochCounterInQueue", - "PendingActionsCount", - "LastPerformedActionsCount" -}; - -enum class HashTablePerfCounter : std::uint16_t -{ - RecordsCount = 0U, - BucketsCount, - TotalKeySize, - TotalValueSize, - TotalIndexSize, - ChainingEntriesCount, - - // Max/Min counters are always increasing. In other words, we don't keep track - // of the next max record size, when the max record is deleted. - MinKeySize, - MaxKeySize, - MinValueSize, - MaxValueSize, - MaxBucketChainLength, - - RecordsCountLoadedFromSerializer, - RecordsCountSavedFromSerializer, - - // CacheHashTable specific counters. - CacheHitCount, - CacheMissCount, - EvictedRecordsCount, - - Count -}; - -const std::array< - const char*, - static_cast(HashTablePerfCounter::Count)> c_hashTablePerfCounterNames = -{ - "RecordsCount", - "BucketsCount", - "TotalKeySize", - "TotalValueSize", - "TotalIndexSize", - "ChainingEntriesCount", - "MinKeySize", - "MaxKeySize", - "MinValueSize", - "MaxValueSize", - "MaxBucketChainLength", - "RecordsCountLoadedFromSerializer", - "RecordsCountSavedFromSerializer", - "CacheHitCount", - "CacheMissCount", - "EvictedRecordsCount" + // EpochManager + "OldestEpochCounterInQueue", "LatestEpochCounterInQueue", + "PendingActionsCount", "LastPerformedActionsCount"}; + +enum class HashTablePerfCounter : std::uint16_t { + RecordsCount = 0U, + BucketsCount, + TotalKeySize, + TotalValueSize, + TotalIndexSize, + ChainingEntriesCount, + + // Max/Min counters are always increasing. In other words, we don't keep track + // of the next max record size, when the max record is deleted. + MinKeySize, + MaxKeySize, + MinValueSize, + MaxValueSize, + MaxBucketChainLength, + + RecordsCountLoadedFromSerializer, + RecordsCountSavedFromSerializer, + + // CacheHashTable specific counters. + CacheHitCount, + CacheMissCount, + EvictedRecordsCount, + + Count }; +const std::array(HashTablePerfCounter::Count)> + c_hashTablePerfCounterNames = {"RecordsCount", + "BucketsCount", + "TotalKeySize", + "TotalValueSize", + "TotalIndexSize", + "ChainingEntriesCount", + "MinKeySize", + "MaxKeySize", + "MinValueSize", + "MaxValueSize", + "MaxBucketChainLength", + "RecordsCountLoadedFromSerializer", + "RecordsCountSavedFromSerializer", + "CacheHitCount", + "CacheMissCount", + "EvictedRecordsCount"}; template -class PerfCounters -{ -public: - typedef std::int64_t TValue; - typedef std::atomic TCounter; +class PerfCounters { + public: + typedef std::int64_t TValue; + typedef std::atomic TCounter; - PerfCounters() - { - std::for_each( - std::begin(m_counters), - std::end(m_counters), - [] (TCounter& counter) - { - counter = 0; - }); + PerfCounters() { + std::for_each(std::begin(m_counters), std::end(m_counters), + [](TCounter& counter) { counter = 0; }); + } + + // Note that since the ordering doesn't matter when the counter is updated, + // memory_order_relaxed is used for all perf counter updates. More from + // http://en.cppreference.com/w/cpp/atomic/memory_order: Typical use for + // relaxed memory ordering is updating counters, such as the reference + // counters of std::shared_ptr, since this only requires atomicity, but not + // ordering or synchronization. + TValue Get(TCounterEnum counterEnum) const { + return m_counters[static_cast(counterEnum)].load( + std::memory_order_relaxed); + } + + void Set(TCounterEnum counterEnum, TValue value) { + m_counters[static_cast(counterEnum)].store( + value, std::memory_order_relaxed); + } + + void Increment(TCounterEnum counterEnum) { + m_counters[static_cast(counterEnum)].fetch_add( + 1, std::memory_order_relaxed); + } + + void Decrement(TCounterEnum counterEnum) { + m_counters[static_cast(counterEnum)].fetch_sub( + 1, std::memory_order_relaxed); + } + + void Add(TCounterEnum counterEnum, TValue value) { + if (value != 0) { + m_counters[static_cast(counterEnum)].fetch_add( + value, std::memory_order_relaxed); } + } - // Note that since the ordering doesn't matter when the counter is updated, memory_order_relaxed - // is used for all perf counter updates. - // More from http://en.cppreference.com/w/cpp/atomic/memory_order: - // Typical use for relaxed memory ordering is updating counters, such as the reference counters - // of std::shared_ptr, since this only requires atomicity, but not ordering or synchronization. - TValue Get(TCounterEnum counterEnum) const - { - return m_counters[static_cast(counterEnum)].load(std::memory_order_relaxed); + void Subtract(TCounterEnum counterEnum, TValue value) { + if (value != 0) { + m_counters[static_cast(counterEnum)].fetch_sub( + value, std::memory_order_relaxed); } + } - void Set(TCounterEnum counterEnum, TValue value) - { - m_counters[static_cast(counterEnum)].store(value, std::memory_order_relaxed); - } + void Max(TCounterEnum counterEnum, TValue value) { + auto& counter = m_counters[static_cast(counterEnum)]; - void Increment(TCounterEnum counterEnum) - { - m_counters[static_cast(counterEnum)].fetch_add(1, std::memory_order_relaxed); - } + TValue startValue = counter.load(std::memory_order_acquire); - void Decrement(TCounterEnum counterEnum) - { - m_counters[static_cast(counterEnum)].fetch_sub(1, std::memory_order_relaxed); - } + do { + // "load()" from counter is needed only once since the value of Max is + // monotonically increasing. If startValue is changed by other threads, + // compare_exchange_strong will return false and startValue will be + // written to the latest value, thus returning to this code path. + if (startValue > value) { + return; + } + } while (!counter.compare_exchange_strong(startValue, value, + std::memory_order_release, + std::memory_order_acquire)); + } - void Add(TCounterEnum counterEnum, TValue value) - { - if (value != 0) - { - m_counters[static_cast(counterEnum)].fetch_add(value, std::memory_order_relaxed); - } - } + void Min(TCounterEnum counterEnum, TValue value) { + auto& counter = m_counters[static_cast(counterEnum)]; - void Subtract(TCounterEnum counterEnum, TValue value) - { - if (value != 0) - { - m_counters[static_cast(counterEnum)].fetch_sub(value, std::memory_order_relaxed); - } - } + TValue startValue = counter.load(std::memory_order_acquire); + do { + // Check the comment in Max() and Min() is monotonically decreasing. + if (startValue < value) { + return; + } + } while (!counter.compare_exchange_strong(startValue, value, + std::memory_order_release, + std::memory_order_acquire)); + } - void Max(TCounterEnum counterEnum, TValue value) - { - auto& counter = m_counters[static_cast(counterEnum)]; - - TValue startValue = counter.load(std::memory_order_acquire); - - do - { - // "load()" from counter is needed only once since the value of Max is - // monotonically increasing. If startValue is changed by other threads, - // compare_exchange_strong will return false and startValue will be - // written to the latest value, thus returning to this code path. - if (startValue > value) - { - return; - } - } - while (!counter.compare_exchange_strong( - startValue, - value, - std::memory_order_release, - std::memory_order_acquire)); - } - - void Min(TCounterEnum counterEnum, TValue value) - { - auto& counter = m_counters[static_cast(counterEnum)]; - - TValue startValue = counter.load(std::memory_order_acquire); - do - { - // Check the comment in Max() and Min() is monotonically decreasing. - if (startValue < value) - { - return; - } - } - while (!counter.compare_exchange_strong( - startValue, - value, - std::memory_order_release, - std::memory_order_acquire)); - } - -private: + private: #if defined(_MSC_VER) - __declspec(align(8)) TCounter m_counters[TCounterEnum::Count]; + __declspec(align(8)) TCounter m_counters[TCounterEnum::Count]; #else #if defined(__GNUC__) - TCounter m_counters[static_cast(TCounterEnum::Count)] - __attribute__((aligned(8))); + TCounter m_counters[static_cast(TCounterEnum::Count)] + __attribute__((aligned(8))); #endif #endif }; typedef PerfCounters ServerPerfData; -struct HashTablePerfData : public PerfCounters -{ - HashTablePerfData() - { - // Initialize any min counters to the max value. - const auto maxValue = (std::numeric_limits::max)(); +struct HashTablePerfData : public PerfCounters { + HashTablePerfData() { + // Initialize any min counters to the max value. + const auto maxValue = + (std::numeric_limits::max)(); - Set(HashTablePerfCounter::MinValueSize, maxValue); - Set(HashTablePerfCounter::MinKeySize, maxValue); + Set(HashTablePerfCounter::MinValueSize, maxValue); + Set(HashTablePerfCounter::MinKeySize, maxValue); - // MaxBucketChainLength starts with 1 since bucket already - // contains the entry which stores the data. - Set(HashTablePerfCounter::MaxBucketChainLength, 1); - } + // MaxBucketChainLength starts with 1 since bucket already + // contains the entry which stores the data. + Set(HashTablePerfCounter::MaxBucketChainLength, 1); + } }; -} // namespace L4 +} // namespace L4 diff --git a/inc/L4/Log/PerfLogger.h b/inc/L4/Log/PerfLogger.h index b8d2c2f..87a5523 100644 --- a/inc/L4/Log/PerfLogger.h +++ b/inc/L4/Log/PerfLogger.h @@ -2,55 +2,48 @@ #include "IPerfLogger.h" -namespace L4 -{ - +namespace L4 { struct PerfLoggerManagerConfig; +// PerfData class, which holds the ServerPerfData and HashTablePerfData for each +// hash table. Note that PerfData owns the ServerPerfData but has only the const +// references to HashTablePerfData, which is owned by the HashTable. -// PerfData class, which holds the ServerPerfData and HashTablePerfData for each hash table. -// Note that PerfData owns the ServerPerfData but has only the const references to HashTablePerfData, -// which is owned by the HashTable. +class PerfData : public IPerfLogger::IData { + public: + PerfData() = default; -class PerfData : public IPerfLogger::IData -{ -public: - PerfData() = default; + ServerPerfData& GetServerPerfData(); - ServerPerfData& GetServerPerfData(); + const ServerPerfData& GetServerPerfData() const override; - const ServerPerfData& GetServerPerfData() const override; + const HashTablesPerfData& GetHashTablesPerfData() const override; - const HashTablesPerfData& GetHashTablesPerfData() const override; + void AddHashTablePerfData(const char* hashTableName, + const HashTablePerfData& perfData); - void AddHashTablePerfData(const char* hashTableName, const HashTablePerfData& perfData); + PerfData(const PerfData&) = delete; + PerfData& operator=(const PerfData&) = delete; - PerfData(const PerfData&) = delete; - PerfData& operator=(const PerfData&) = delete; - -private: - ServerPerfData m_serverPerfData; - HashTablesPerfData m_hashTablesPerfData; + private: + ServerPerfData m_serverPerfData; + HashTablesPerfData m_hashTablesPerfData; }; - // PerfData inline implementations. -inline ServerPerfData& PerfData::GetServerPerfData() -{ - return m_serverPerfData; +inline ServerPerfData& PerfData::GetServerPerfData() { + return m_serverPerfData; } -inline const ServerPerfData& PerfData::GetServerPerfData() const -{ - return m_serverPerfData; +inline const ServerPerfData& PerfData::GetServerPerfData() const { + return m_serverPerfData; } -inline const PerfData::HashTablesPerfData& PerfData::GetHashTablesPerfData() const -{ - return m_hashTablesPerfData; +inline const PerfData::HashTablesPerfData& PerfData::GetHashTablesPerfData() + const { + return m_hashTablesPerfData; } - -} // namespace L4 \ No newline at end of file +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Serialization/SerializerHelper.h b/inc/L4/Serialization/SerializerHelper.h index eff510b..75b2c1b 100644 --- a/inc/L4/Serialization/SerializerHelper.h +++ b/inc/L4/Serialization/SerializerHelper.h @@ -3,62 +3,48 @@ #include #include -namespace L4 -{ +namespace L4 { // SerializerHelper provides help functions to write to IStreamWriter. -class SerializerHelper -{ -public: - SerializerHelper(std::ostream& stream) - : m_stream{ stream } - {} +class SerializerHelper { + public: + SerializerHelper(std::ostream& stream) : m_stream{stream} {} - SerializerHelper(const SerializerHelper&) = delete; - SerializerHelper& operator=(const SerializerHelper&) = delete; + SerializerHelper(const SerializerHelper&) = delete; + SerializerHelper& operator=(const SerializerHelper&) = delete; - template - void Serialize(const T& obj) - { - m_stream.write(reinterpret_cast(&obj), sizeof(obj)); - } + template + void Serialize(const T& obj) { + m_stream.write(reinterpret_cast(&obj), sizeof(obj)); + } - void Serialize(const void* data, std::uint32_t dataSize) - { - m_stream.write(static_cast(data), dataSize); - } + void Serialize(const void* data, std::uint32_t dataSize) { + m_stream.write(static_cast(data), dataSize); + } -private: - std::ostream& m_stream; + private: + std::ostream& m_stream; }; - // DeserializerHelper provides help functions to read from IStreamReader. -class DeserializerHelper -{ -public: - DeserializerHelper(std::istream& stream) - : m_stream{ stream } - {} +class DeserializerHelper { + public: + DeserializerHelper(std::istream& stream) : m_stream{stream} {} - DeserializerHelper(const DeserializerHelper&) = delete; - DeserializerHelper& operator=(const DeserializerHelper&) = delete; + DeserializerHelper(const DeserializerHelper&) = delete; + DeserializerHelper& operator=(const DeserializerHelper&) = delete; - template - void Deserialize(T& obj) - { - m_stream.read(reinterpret_cast(&obj), sizeof(obj)); - } + template + void Deserialize(T& obj) { + m_stream.read(reinterpret_cast(&obj), sizeof(obj)); + } - void Deserialize(void* data, std::uint32_t dataSize) - { - m_stream.read(static_cast(data), dataSize); - } + void Deserialize(void* data, std::uint32_t dataSize) { + m_stream.read(static_cast(data), dataSize); + } -private: - std::istream& m_stream; + private: + std::istream& m_stream; }; - -} // namespace L4 - +} // namespace L4 diff --git a/inc/L4/Utils/AtomicOffsetPtr.h b/inc/L4/Utils/AtomicOffsetPtr.h index f990f58..6f22517 100644 --- a/inc/L4/Utils/AtomicOffsetPtr.h +++ b/inc/L4/Utils/AtomicOffsetPtr.h @@ -1,60 +1,56 @@ #pragma once #include -#include -#include #include +#include +#include -namespace L4 -{ -namespace Utils -{ - +namespace L4 { +namespace Utils { // AtomicOffsetPtr provides a way to atomically update the offset pointer. -// The current boost::interprocess::offset_ptr cannot be used with std::atomic<> because -// the class is not trivially copyable. AtomicOffsetPtr borrows the same concept to calculate -// the pointer address based on the offset (boost::interprocess::ipcdetail::offset_ptr_to* functions -// are reused). -// Note that ->, *, copy/assignment operators are not implemented intentionally so that -// the user (inside this library) is aware of what he is intended to do without accidentally -// incurring any performance hits. +// The current boost::interprocess::offset_ptr cannot be used with std::atomic<> +// because the class is not trivially copyable. AtomicOffsetPtr borrows the same +// concept to calculate the pointer address based on the offset +// (boost::interprocess::ipcdetail::offset_ptr_to* functions are reused). Note +// that ->, *, copy/assignment operators are not implemented intentionally so +// that the user (inside this library) is aware of what he is intended to do +// without accidentally incurring any performance hits. template -class AtomicOffsetPtr -{ -public: - AtomicOffsetPtr() - : m_offset(1) - {} +class AtomicOffsetPtr { + public: + AtomicOffsetPtr() : m_offset(1) {} - AtomicOffsetPtr(const AtomicOffsetPtr&) = delete; - AtomicOffsetPtr& operator=(const AtomicOffsetPtr&) = delete; + AtomicOffsetPtr(const AtomicOffsetPtr&) = delete; + AtomicOffsetPtr& operator=(const AtomicOffsetPtr&) = delete; - T* Load(std::memory_order memoryOrder = std::memory_order_seq_cst) const - { - return static_cast( - boost::interprocess::ipcdetail::offset_ptr_to_raw_pointer( - this, - m_offset.load(memoryOrder))); - } + T* Load(std::memory_order memoryOrder = std::memory_order_seq_cst) const { + return static_cast( + boost::interprocess::ipcdetail::offset_ptr_to_raw_pointer( + this, m_offset.load(memoryOrder))); + } - void Store(T* ptr, std::memory_order memoryOrder = std::memory_order_seq_cst) - { + void Store(T* ptr, + std::memory_order memoryOrder = std::memory_order_seq_cst) { #if defined(_MSC_VER) - m_offset.store(boost::interprocess::ipcdetail::offset_ptr_to_offset(ptr, this), memoryOrder); + m_offset.store( + boost::interprocess::ipcdetail::offset_ptr_to_offset(ptr, this), + memoryOrder); #else - m_offset.store(boost::interprocess::ipcdetail::offset_ptr_to_offset(ptr, this), memoryOrder); + m_offset.store( + boost::interprocess::ipcdetail::offset_ptr_to_offset( + ptr, this), + memoryOrder); #endif - } + } -private: + private: #if defined(_MSC_VER) - std::atomic_uint64_t m_offset; + std::atomic_uint64_t m_offset; #else - std::atomic m_offset; + std::atomic m_offset; #endif }; - -} // namespace Utils -} // namespace L4 +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/Clock.h b/inc/L4/Utils/Clock.h index 4d83cb4..5fdb4ef 100644 --- a/inc/L4/Utils/Clock.h +++ b/inc/L4/Utils/Clock.h @@ -2,23 +2,16 @@ #include +namespace L4 { +namespace Utils { -namespace L4 -{ -namespace Utils -{ - - -class EpochClock -{ -public: - std::chrono::seconds GetCurrentEpochTime() const - { - return std::chrono::duration_cast( - std::chrono::high_resolution_clock::now().time_since_epoch()); - } +class EpochClock { + public: + std::chrono::seconds GetCurrentEpochTime() const { + return std::chrono::duration_cast( + std::chrono::high_resolution_clock::now().time_since_epoch()); + } }; - -} // namespace Utils -} // namespace L4 +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/ComparerHasher.h b/inc/L4/Utils/ComparerHasher.h index 202c53e..142a4d0 100644 --- a/inc/L4/Utils/ComparerHasher.h +++ b/inc/L4/Utils/ComparerHasher.h @@ -1,72 +1,62 @@ #pragma once +#include #include #include #include -#include #if defined(__GNUC__) #define _stricmp strcasecmp #endif -namespace L4 -{ -namespace Utils -{ +namespace L4 { +namespace Utils { - -// CaseInsensitiveStdStringComparer is a STL-compatible case-insensitive ANSI std::string comparer. -struct CaseInsensitiveStdStringComparer -{ - bool operator()(const std::string& str1, const std::string& str2) const - { - return _stricmp(str1.c_str(), str2.c_str()) == 0; - } +// CaseInsensitiveStdStringComparer is a STL-compatible case-insensitive ANSI +// std::string comparer. +struct CaseInsensitiveStdStringComparer { + bool operator()(const std::string& str1, const std::string& str2) const { + return _stricmp(str1.c_str(), str2.c_str()) == 0; + } }; -// CaseInsensitiveStringComparer is a STL-compatible case-insensitive ANSI string comparer. -struct CaseInsensitiveStringComparer -{ - bool operator()(const char* const str1, const char* const str2) const - { - return _stricmp(str1, str2) == 0; - } +// CaseInsensitiveStringComparer is a STL-compatible case-insensitive ANSI +// string comparer. +struct CaseInsensitiveStringComparer { + bool operator()(const char* const str1, const char* const str2) const { + return _stricmp(str1, str2) == 0; + } }; -// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI std::string hasher. -struct CaseInsensitiveStdStringHasher -{ - std::size_t operator()(const std::string& str) const - { - std::size_t seed = 0; +// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI +// std::string hasher. +struct CaseInsensitiveStdStringHasher { + std::size_t operator()(const std::string& str) const { + std::size_t seed = 0; - for (auto c : str) - { - boost::hash_combine(seed, std::toupper(c)); - } - - return seed; + for (auto c : str) { + boost::hash_combine(seed, std::toupper(c)); } + + return seed; + } }; -// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI string hasher. -struct CaseInsensitiveStringHasher -{ - std::size_t operator()(const char* str) const - { - assert(str != nullptr); +// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI string +// hasher. +struct CaseInsensitiveStringHasher { + std::size_t operator()(const char* str) const { + assert(str != nullptr); - std::size_t seed = 0; + std::size_t seed = 0; - while (*str) - { - boost::hash_combine(seed, std::toupper(*str++)); - } - - return seed; + while (*str) { + boost::hash_combine(seed, std::toupper(*str++)); } + + return seed; + } }; - -} // namespace Utils -} // namespace L4 +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/Containers.h b/inc/L4/Utils/Containers.h index c1a2eb1..010a179 100644 --- a/inc/L4/Utils/Containers.h +++ b/inc/L4/Utils/Containers.h @@ -1,45 +1,37 @@ #pragma once +#include #include #include #include -#include #include "Utils/ComparerHasher.h" +namespace L4 { +namespace Utils { -namespace L4 -{ -namespace Utils -{ - - -// StdStringKeyMap is an unordered_map where the key is std::string. It is slower than -// StringKeyMap above, but it owns the memory of the string, so it's easier to use. +// StdStringKeyMap is an unordered_map where the key is std::string. It is +// slower than StringKeyMap above, but it owns the memory of the string, so it's +// easier to use. template -using StdStringKeyMap = std::unordered_map< - std::string, - TValue, - Utils::CaseInsensitiveStdStringHasher, - Utils::CaseInsensitiveStdStringComparer>; +using StdStringKeyMap = + std::unordered_map; // StringKeyMap is an unordered_map where the key is const char*. // The memory of the key is not owned by StringKeyMap, // but it is faster (than StdStringKeyMap below) for look up. template -using StringKeyMap = std::unordered_map< - const char*, - TValue, - Utils::CaseInsensitiveStringHasher, - Utils::CaseInsensitiveStringComparer>; +using StringKeyMap = std::unordered_map; // IntegerKeyMap using boost::hash and std::equal_to comparer and hasher. template -using IntegerKeyMap = std::unordered_map< - TKey, - TValue, - boost::hash, - std::equal_to>; +using IntegerKeyMap = + std::unordered_map, std::equal_to>; - -} // namespace Utils -} // namespace L4 \ No newline at end of file +} // namespace Utils +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Utils/Exception.h b/inc/L4/Utils/Exception.h index 4597d0d..bb6c8e0 100644 --- a/inc/L4/Utils/Exception.h +++ b/inc/L4/Utils/Exception.h @@ -1,22 +1,18 @@ #pragma once -#include #include +#include -namespace L4 -{ +namespace L4 { // RuntimeException class used across L4 library. -class RuntimeException : public std::runtime_error -{ -public: - explicit RuntimeException(const std::string& message) - : std::runtime_error(message.c_str()) - {} +class RuntimeException : public std::runtime_error { + public: + explicit RuntimeException(const std::string& message) + : std::runtime_error(message.c_str()) {} - explicit RuntimeException(const char* message) - : std::runtime_error(message) - {} + explicit RuntimeException(const char* message) + : std::runtime_error(message) {} }; -} // namespace L4 +} // namespace L4 diff --git a/inc/L4/Utils/Lock.h b/inc/L4/Utils/Lock.h index 27b9f0e..cf0b3ee 100644 --- a/inc/L4/Utils/Lock.h +++ b/inc/L4/Utils/Lock.h @@ -12,150 +12,96 @@ #endif #endif - -namespace L4 -{ -namespace Utils -{ +namespace L4 { +namespace Utils { #if defined(_MSC_VER) // Represents a RAII wrapper for Win32 CRITICAL_SECTION. -class CriticalSection : protected ::CRITICAL_SECTION -{ -public: - // Constructs and initializes the critical section. - CriticalSection() - { - ::InitializeCriticalSection(this); - } +class CriticalSection : protected ::CRITICAL_SECTION { + public: + // Constructs and initializes the critical section. + CriticalSection() { ::InitializeCriticalSection(this); } - CriticalSection(const CriticalSection& other) = delete; - CriticalSection& operator=(const CriticalSection& other) = delete; + CriticalSection(const CriticalSection& other) = delete; + CriticalSection& operator=(const CriticalSection& other) = delete; - // Destructs the critical section. - ~CriticalSection() - { - ::DeleteCriticalSection(this); - } + // Destructs the critical section. + ~CriticalSection() { ::DeleteCriticalSection(this); } - // Waits for ownership of the critical section. - void lock() - { - ::EnterCriticalSection(this); - } + // Waits for ownership of the critical section. + void lock() { ::EnterCriticalSection(this); } - // Releases ownership of the critical section. - void unlock() - { - ::LeaveCriticalSection(this); - } + // Releases ownership of the critical section. + void unlock() { ::LeaveCriticalSection(this); } }; // Represents a RAII wrapper for Win32 SRW lock. -class ReaderWriterLockSlim -{ -public: - // Constructs and initializes an SRW lock. - ReaderWriterLockSlim() - { - ::InitializeSRWLock(&m_lock); - } +class ReaderWriterLockSlim { + public: + // Constructs and initializes an SRW lock. + ReaderWriterLockSlim() { ::InitializeSRWLock(&m_lock); } - ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete; - ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete; + ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete; + ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete; - // Acquires an SRW lock in shared mode. - void lock_shared() - { - ::AcquireSRWLockShared(&m_lock); - } + // Acquires an SRW lock in shared mode. + void lock_shared() { ::AcquireSRWLockShared(&m_lock); } - // Acquires an SRW lock in exclusive mode. - void lock() - { - ::AcquireSRWLockExclusive(&m_lock); - } + // Acquires an SRW lock in exclusive mode. + void lock() { ::AcquireSRWLockExclusive(&m_lock); } - // Releases an SRW lock that was opened in shared mode. - void unlock_shared() - { - ::ReleaseSRWLockShared(&m_lock); - } + // Releases an SRW lock that was opened in shared mode. + void unlock_shared() { ::ReleaseSRWLockShared(&m_lock); } - // Releases an SRW lock that was opened in exclusive mode. - void unlock() - { - ::ReleaseSRWLockExclusive(&m_lock); - } + // Releases an SRW lock that was opened in exclusive mode. + void unlock() { ::ReleaseSRWLockExclusive(&m_lock); } -private: - // Stores the Win32 SRW lock. - ::SRWLOCK m_lock; + private: + // Stores the Win32 SRW lock. + ::SRWLOCK m_lock; }; #else #if defined(__GNUC__) -class CriticalSection -{ -public: - CriticalSection() - : m_mutex{} - {} +class CriticalSection { + public: + CriticalSection() : m_mutex{} {} - CriticalSection(const CriticalSection& other) = delete; - CriticalSection& operator=(const CriticalSection& other) = delete; + CriticalSection(const CriticalSection& other) = delete; + CriticalSection& operator=(const CriticalSection& other) = delete; - ~CriticalSection() = default; + ~CriticalSection() = default; - void lock() - { - pthread_mutex_lock(&m_mutex); - } + void lock() { pthread_mutex_lock(&m_mutex); } - void unlock() - { - pthread_mutex_unlock(&m_mutex); - } + void unlock() { pthread_mutex_unlock(&m_mutex); } -private: - pthread_mutex_t m_mutex; + private: + pthread_mutex_t m_mutex; }; -class ReaderWriterLockSlim -{ -public: - ReaderWriterLockSlim() = default; - ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete; - ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete; +class ReaderWriterLockSlim { + public: + ReaderWriterLockSlim() = default; + ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete; + ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete; - void lock_shared() - { - pthread_rwlock_rdlock(&m_lock); - } + void lock_shared() { pthread_rwlock_rdlock(&m_lock); } - void lock() - { - pthread_rwlock_wrlock(&m_lock); - } + void lock() { pthread_rwlock_wrlock(&m_lock); } - void unlock_shared() - { - pthread_rwlock_unlock(&m_lock); - } + void unlock_shared() { pthread_rwlock_unlock(&m_lock); } - void unlock() - { - unlock_shared(); - } + void unlock() { unlock_shared(); } -private: - pthread_rwlock_t m_lock = PTHREAD_RWLOCK_INITIALIZER; + private: + pthread_rwlock_t m_lock = PTHREAD_RWLOCK_INITIALIZER; }; #endif #endif -} // namespace Utils -} // namespace L4 +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/Math.h b/inc/L4/Utils/Math.h index 3fbdea5..65076bb 100644 --- a/inc/L4/Utils/Math.h +++ b/inc/L4/Utils/Math.h @@ -1,79 +1,64 @@ #pragma once -#include -#include #include +#include +#include - -namespace L4 -{ -namespace Utils -{ -namespace Math -{ - +namespace L4 { +namespace Utils { +namespace Math { // Rounds up the number to the nearest multiple of base. -inline std::uint64_t RoundUp(std::uint64_t number, std::uint64_t base) -{ - return base ? (((number + base - 1) / base) * base) : number; +inline std::uint64_t RoundUp(std::uint64_t number, std::uint64_t base) { + return base ? (((number + base - 1) / base) * base) : number; } // Rounds down the number to the nearest multiple of base. -inline std::uint64_t RoundDown(std::uint64_t number, std::uint64_t base) -{ - return base ? ((number / base) * base) : number; +inline std::uint64_t RoundDown(std::uint64_t number, std::uint64_t base) { + return base ? ((number / base) * base) : number; } // Returns true if the given number is a power of 2. -inline bool IsPowerOfTwo(std::uint64_t number) -{ - return number && ((number & (number - 1)) == 0); +inline bool IsPowerOfTwo(std::uint64_t number) { + return number && ((number & (number - 1)) == 0); } // Returns the next highest power of two from the given value. // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2. -inline std::uint32_t NextHighestPowerOfTwo(std::uint32_t val) -{ - --val; - val |= val >> 1; - val |= val >> 2; - val |= val >> 4; - val |= val >> 8; - val |= val >> 16; - return ++val; +inline std::uint32_t NextHighestPowerOfTwo(std::uint32_t val) { + --val; + val |= val >> 1; + val |= val >> 2; + val |= val >> 4; + val |= val >> 8; + val |= val >> 16; + return ++val; } - // Provides utility functions doing pointer related arithmetics. -namespace PointerArithmetic -{ - +namespace PointerArithmetic { // Returns a new pointer after adding an offset. template -inline T* Add(T* ptr, std::size_t offset) -{ - return reinterpret_cast(reinterpret_cast(ptr) + offset); +inline T* Add(T* ptr, std::size_t offset) { + return reinterpret_cast(reinterpret_cast(ptr) + offset); } // Returns a new pointer after subtracting an offset. template -inline T* Subtract(T* ptr, std::size_t offset) -{ - return reinterpret_cast(reinterpret_cast(ptr) - offset); +inline T* Subtract(T* ptr, std::size_t offset) { + return reinterpret_cast(reinterpret_cast(ptr) - offset); } -// Returns the absolute value of difference in the number of bytes between two pointers. -inline std::size_t Distance(const void* lhs, const void* rhs) -{ - return std::abs(reinterpret_cast(lhs) - reinterpret_cast(rhs)); +// Returns the absolute value of difference in the number of bytes between two +// pointers. +inline std::size_t Distance(const void* lhs, const void* rhs) { + return std::abs(reinterpret_cast(lhs) - + reinterpret_cast(rhs)); } +} // namespace PointerArithmetic -} // namespace PointerArithmetic - - -} // namespace Math -} // namespace Utils -} // namespace L4 \ No newline at end of file +} // namespace Math +} // namespace Utils +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Utils/Properties.h b/inc/L4/Utils/Properties.h index d8e3c09..f65ee25 100644 --- a/inc/L4/Utils/Properties.h +++ b/inc/L4/Utils/Properties.h @@ -4,53 +4,41 @@ #include - -namespace L4 -{ -namespace Utils -{ - +namespace L4 { +namespace Utils { // Properties class represents a string to string map (case insensitive). // It can be used where the configurations should be generic. -class Properties : public StdStringKeyMap -{ -public: - using Base = Utils::StdStringKeyMap; - using Value = Base::value_type; +class Properties : public StdStringKeyMap { + public: + using Base = Utils::StdStringKeyMap; + using Value = Base::value_type; - Properties() = default; + Properties() = default; - // Expose a constructor with initializer_list for convenience. - Properties(std::initializer_list values) - : Base(values) - { + // Expose a constructor with initializer_list for convenience. + Properties(std::initializer_list values) : Base(values) {} + + // Returns true if the given key exists and the value associated with + // the key can be converted to the TValue type. If the conversion fails, the + // value of the given val is guaranteed to remain the same. + template + bool TryGet(const std::string& key, TValue& val) const { + const auto it = find(key); + if (it == end()) { + return false; } - // Returns true if the given key exists and the value associated with - // the key can be converted to the TValue type. If the conversion fails, the value - // of the given val is guaranteed to remain the same. - template - bool TryGet(const std::string& key, TValue& val) const - { - const auto it = find(key); - if (it == end()) - { - return false; - } - - TValue tmp; - if (!boost::conversion::try_lexical_convert(it->second, tmp)) - { - return false; - } - - val = tmp; - - return true; + TValue tmp; + if (!boost::conversion::try_lexical_convert(it->second, tmp)) { + return false; } + + val = tmp; + + return true; + } }; - -} // namespace Utils -} // namespace L4 \ No newline at end of file +} // namespace Utils +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Utils/RunningThread.h b/inc/L4/Utils/RunningThread.h index cf7c88b..3a3a5d3 100644 --- a/inc/L4/Utils/RunningThread.h +++ b/inc/L4/Utils/RunningThread.h @@ -1,79 +1,60 @@ #pragma once +#include #include #include #include -#include - - -namespace L4 -{ -namespace Utils -{ +namespace L4 { +namespace Utils { // NoOp is a function object that doesn't do anything. -struct NoOp -{ - void operator()(...) {} +struct NoOp { + void operator()(...) {} }; -// RunningThread wraps around std::thread and repeatedly runs a given function after yielding -// for the given interval. Note that the destructor waits for the thread to stop. +// RunningThread wraps around std::thread and repeatedly runs a given function +// after yielding for the given interval. Note that the destructor waits for the +// thread to stop. template -class RunningThread -{ -public: - RunningThread( - std::chrono::milliseconds interval, - CoreFunc coreFunc, - PrepFunc prepFunc = PrepFunc()) - : m_isRunning(), - m_thread( - &RunningThread::Start, - this, - interval, - coreFunc, - prepFunc) - { +class RunningThread { + public: + RunningThread(std::chrono::milliseconds interval, + CoreFunc coreFunc, + PrepFunc prepFunc = PrepFunc()) + : m_isRunning(), + m_thread(&RunningThread::Start, this, interval, coreFunc, prepFunc) {} + + ~RunningThread() { + m_isRunning.store(false); + + if (m_thread.joinable()) { + m_thread.join(); } + } - ~RunningThread() - { - m_isRunning.store(false); + RunningThread(const RunningThread&) = delete; + RunningThread& operator=(const RunningThread&) = delete; - if (m_thread.joinable()) - { - m_thread.join(); - } + private: + void Start(std::chrono::milliseconds interval, + CoreFunc coreFunc, + PrepFunc prepFunc) { + m_isRunning.store(true); + + prepFunc(); + + while (m_isRunning.load()) { + coreFunc(); + + std::this_thread::sleep_for(interval); } + } - RunningThread(const RunningThread&) = delete; - RunningThread& operator=(const RunningThread&) = delete; + std::atomic_bool m_isRunning; -private: - void Start( - std::chrono::milliseconds interval, - CoreFunc coreFunc, - PrepFunc prepFunc) - { - m_isRunning.store(true); - - prepFunc(); - - while (m_isRunning.load()) - { - coreFunc(); - - std::this_thread::sleep_for(interval); - } - } - - std::atomic_bool m_isRunning; - - std::thread m_thread; + std::thread m_thread; }; - -} // namespace Utils -} // namespace L4 +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/Windows.h b/inc/L4/Utils/Windows.h index 1ee6dd0..d459e40 100644 --- a/inc/L4/Utils/Windows.h +++ b/inc/L4/Utils/Windows.h @@ -2,38 +2,38 @@ // Allow macro redefinition. #pragma warning(push) -#pragma warning(disable:4005) +#pragma warning(disable : 4005) // Explicitly excluding API groups //#define NOGDICAPMASKS // - CC_*, LC_*, PC_*, CP_*, TC_*, RC_ -#define NOVIRTUALKEYCODES // - VK_* +#define NOVIRTUALKEYCODES // - VK_* //#define NOWINMESSAGES // - WM_*, EM_*, LB_*, CB_* -#define NOWINSTYLES // - WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_* -#define NOSYSMETRICS // - SM_* -#define NOMENUS // - MF_* -#define NOICONS // - IDI_* -#define NOKEYSTATES // - MK_* -#define NOSYSCOMMANDS // - SC_* -#define NORASTEROPS // - Binary and Tertiary raster ops -#define NOSHOWWINDOW // - SW_* -#define OEMRESOURCE // - OEM Resource values -#define NOATOM // - Atom Manager routines -#define NOCLIPBOARD // - Clipboard routines -#define NOCOLOR // - Screen colors +#define NOWINSTYLES // - WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_* +#define NOSYSMETRICS // - SM_* +#define NOMENUS // - MF_* +#define NOICONS // - IDI_* +#define NOKEYSTATES // - MK_* +#define NOSYSCOMMANDS // - SC_* +#define NORASTEROPS // - Binary and Tertiary raster ops +#define NOSHOWWINDOW // - SW_* +#define OEMRESOURCE // - OEM Resource values +#define NOATOM // - Atom Manager routines +#define NOCLIPBOARD // - Clipboard routines +#define NOCOLOR // - Screen colors //#define NOCTLMGR // - Control and Dialog routines -#define NODRAWTEXT // - DrawText() and DT_* -#define NOGDI // - All GDI defines and routines -#define NOKERNEL // - All KERNEL defines and routines -#define NONLS // - All NLS (natural language interfaces) defines and routines -#define NOMB // - MB_* and MessageBox() -#define NOMEMMGR // - GMEM_*, LMEM_*, GHND, LHND, associated routines -#define NOMETAFILE // - typedef METAFILEPICT -#define NOMINMAX // - Macros min(a,b) and max(a,b) +#define NODRAWTEXT // - DrawText() and DT_* +#define NOGDI // - All GDI defines and routines +#define NOKERNEL // - All KERNEL defines and routines +#define NONLS // - All NLS (natural language interfaces) defines and routines +#define NOMB // - MB_* and MessageBox() +#define NOMEMMGR // - GMEM_*, LMEM_*, GHND, LHND, associated routines +#define NOMETAFILE // - typedef METAFILEPICT +#define NOMINMAX // - Macros min(a,b) and max(a,b) //#define NOMSG // - typedef MSG and associated routines -#define NOOPENFILE // - OpenFile(), OemToAnsi, AnsiToOem, and OF_* -#define NOSCROLL // - SB_* and scrolling routines -#define NOSERVICE // - All Service Controller routines, SERVICE_ equates, etc. -#define NOSOUND // - Sound driver routines +#define NOOPENFILE // - OpenFile(), OemToAnsi, AnsiToOem, and OF_* +#define NOSCROLL // - SB_* and scrolling routines +#define NOSERVICE // - All Service Controller routines, SERVICE_ equates, etc. +#define NOSOUND // - Sound driver routines #define NOTEXTMETRIC // - typedef TEXTMETRIC and associated routines #define NOWH // - SetWindowsHook and WH_* #define NOWINOFFSETS // - GWL_*, GCL_*, associated routines @@ -44,14 +44,15 @@ #define NODEFERWINDOWPOS // - DeferWindowPos routines #define NOMCX // - Modem Configuration Extensions -// Enabling STRICT redefines certain data types so that the compiler does not permit assignment from one type to another without an explicit cast. +// Enabling STRICT redefines certain data types so that the compiler does not +// permit assignment from one type to another without an explicit cast. #define STRICT -// Define WIN32_LEAN_AND_MEAN to exclude APIs such as Cryptography, DDE, RPC, Shell, and Windows Sockets. -// Cryptography is needed due to +// Define WIN32_LEAN_AND_MEAN to exclude APIs such as Cryptography, DDE, RPC, +// Shell, and Windows Sockets. Cryptography is needed due to +// //#define WIN32_LEAN_AND_MEAN #pragma warning(pop) - #include diff --git a/inc/L4/detail/ToRawPointer.h b/inc/L4/detail/ToRawPointer.h index f8b6a37..73f3aaf 100644 --- a/inc/L4/detail/ToRawPointer.h +++ b/inc/L4/detail/ToRawPointer.h @@ -2,14 +2,10 @@ #include -namespace L4 -{ -namespace Detail -{ - +namespace L4 { +namespace Detail { using boost::interprocess::ipcdetail::to_raw_pointer; - -} // namespace Detail -} // namespace L4 \ No newline at end of file +} // namespace Detail +} // namespace L4 \ No newline at end of file diff --git a/src/EpochActionManager.cpp b/src/EpochActionManager.cpp index 4b9fc5b..dd29717 100644 --- a/src/EpochActionManager.cpp +++ b/src/EpochActionManager.cpp @@ -5,83 +5,73 @@ #include #include -namespace L4 -{ +namespace L4 { // EpochActionManager class implementation. EpochActionManager::EpochActionManager(std::uint8_t numActionQueues) - : m_epochToActionsList{} - , m_counter{} -{ - // Calculate numActionQueues as the next highest power of two. - std::uint16_t newNumActionQueues = numActionQueues; - if (numActionQueues == 0U) - { - newNumActionQueues = static_cast(std::thread::hardware_concurrency()); - } - newNumActionQueues = static_cast(Utils::Math::NextHighestPowerOfTwo(newNumActionQueues)); + : m_epochToActionsList{}, m_counter{} { + // Calculate numActionQueues as the next highest power of two. + std::uint16_t newNumActionQueues = numActionQueues; + if (numActionQueues == 0U) { + newNumActionQueues = + static_cast(std::thread::hardware_concurrency()); + } + newNumActionQueues = static_cast( + Utils::Math::NextHighestPowerOfTwo(newNumActionQueues)); - assert(newNumActionQueues != 0U && Utils::Math::IsPowerOfTwo(newNumActionQueues)); + assert(newNumActionQueues != 0U && + Utils::Math::IsPowerOfTwo(newNumActionQueues)); - // Initialize m_epochToActionsList. - m_epochToActionsList.resize(newNumActionQueues); - for (auto& epochToActions : m_epochToActionsList) - { - std::get<0>(epochToActions) = std::make_unique(); - } + // Initialize m_epochToActionsList. + m_epochToActionsList.resize(newNumActionQueues); + for (auto& epochToActions : m_epochToActionsList) { + std::get<0>(epochToActions) = std::make_unique(); + } } +void EpochActionManager::RegisterAction(std::uint64_t epochCounter, + IEpochActionManager::Action&& action) { + std::uint32_t index = ++m_counter & (m_epochToActionsList.size() - 1); + auto& epochToActions = m_epochToActionsList[index]; -void EpochActionManager::RegisterAction(std::uint64_t epochCounter, IEpochActionManager::Action&& action) -{ - std::uint32_t index = ++m_counter & (m_epochToActionsList.size() - 1); - auto& epochToActions = m_epochToActionsList[index]; - - Lock lock(*std::get<0>(epochToActions)); - std::get<1>(epochToActions)[epochCounter].emplace_back(std::move(action)); + Lock lock(*std::get<0>(epochToActions)); + std::get<1>(epochToActions)[epochCounter].emplace_back(std::move(action)); } +std::uint64_t EpochActionManager::PerformActions(std::uint64_t epochCounter) { + // Actions will be moved here and performed without a lock. + Actions actionsToPerform; -std::uint64_t EpochActionManager::PerformActions(std::uint64_t epochCounter) -{ - // Actions will be moved here and performed without a lock. - Actions actionsToPerform; + for (auto& epochToActionsWithLock : m_epochToActionsList) { + Lock lock(*std::get<0>(epochToActionsWithLock)); - for (auto& epochToActionsWithLock : m_epochToActionsList) - { - Lock lock(*std::get<0>(epochToActionsWithLock)); + // lower_bound() so that it is deleted up to but not including epochCounter. + auto& epochToActions = std::get<1>(epochToActionsWithLock); + const auto endIt = epochToActions.lower_bound(epochCounter); - // lower_bound() so that it is deleted up to but not including epochCounter. - auto& epochToActions = std::get<1>(epochToActionsWithLock); - const auto endIt = epochToActions.lower_bound(epochCounter); + auto it = epochToActions.begin(); - auto it = epochToActions.begin(); + while (it != endIt) { + actionsToPerform.insert(actionsToPerform.end(), + std::make_move_iterator(it->second.begin()), + std::make_move_iterator(it->second.end())); - while (it != endIt) - { - actionsToPerform.insert( - actionsToPerform.end(), - std::make_move_iterator(it->second.begin()), - std::make_move_iterator(it->second.end())); - - // The following post increment is intentional to avoid iterator invalidation issue. - epochToActions.erase(it++); - } + // The following post increment is intentional to avoid iterator + // invalidation issue. + epochToActions.erase(it++); } + } - ApplyActions(actionsToPerform); + ApplyActions(actionsToPerform); - return actionsToPerform.size(); + return actionsToPerform.size(); } - -void EpochActionManager::ApplyActions(Actions& actions) -{ - for (auto& action : actions) - { - action(); - } +void EpochActionManager::ApplyActions(Actions& actions) { + for (auto& action : actions) { + action(); + } } -} // namespace L4 +} // namespace L4 diff --git a/src/Interprocess/Connection/ConnectionMonitor.cpp b/src/Interprocess/Connection/ConnectionMonitor.cpp index cf150fe..46f0e2b 100644 --- a/src/Interprocess/Connection/ConnectionMonitor.cpp +++ b/src/Interprocess/Connection/ConnectionMonitor.cpp @@ -1,175 +1,149 @@ #include "Interprocess/Connection/ConnectionMonitor.h" +#include #include "Interprocess/Connection/EndPointInfoUtils.h" #include "Utils/Exception.h" #include "Utils/Windows.h" -#include -namespace L4 -{ -namespace Interprocess -{ -namespace Connection -{ +namespace L4 { +namespace Interprocess { +namespace Connection { // ConnectionMonitor class implementation. ConnectionMonitor::ConnectionMonitor() - : m_localEndPoint{ EndPointInfoFactory().Create() } - , m_localEvent{ - ::CreateEvent( - NULL, - TRUE, // Manual reset in order to notify all end points registered. - FALSE, - StringConverter()(m_localEndPoint).c_str()) } -{} + : m_localEndPoint{EndPointInfoFactory().Create()}, + m_localEvent{::CreateEvent( + NULL, + TRUE, // Manual reset in order to notify all end points registered. + FALSE, + StringConverter()(m_localEndPoint).c_str())} {} - -ConnectionMonitor::~ConnectionMonitor() -{ - // Notify the remote endpoints. - ::SetEvent(static_cast(m_localEvent)); +ConnectionMonitor::~ConnectionMonitor() { + // Notify the remote endpoints. + ::SetEvent(static_cast(m_localEvent)); } - -const EndPointInfo& ConnectionMonitor::GetLocalEndPointInfo() const -{ - return m_localEndPoint; +const EndPointInfo& ConnectionMonitor::GetLocalEndPointInfo() const { + return m_localEndPoint; } +std::size_t ConnectionMonitor::GetRemoteConnectionsCount() const { + UnRegister(); -std::size_t ConnectionMonitor::GetRemoteConnectionsCount() const -{ - UnRegister(); - - std::lock_guard lock(m_mutexOnRemoteMonitors); - return m_remoteMonitors.size(); + std::lock_guard lock(m_mutexOnRemoteMonitors); + return m_remoteMonitors.size(); } +void ConnectionMonitor::Register(const EndPointInfo& remoteEndPoint, + Callback callback) { + UnRegister(); -void ConnectionMonitor::Register(const EndPointInfo& remoteEndPoint, Callback callback) -{ - UnRegister(); + // The following is needed to prevent the case where the callback is trying + // to call UnRegister() when the ConnectionMonitor is already destroyed. + std::weak_ptr thisWeakPtr = this->shared_from_this(); - // The following is needed to prevent the case where the callback is trying - // to call UnRegister() when the ConnectionMonitor is already destroyed. - std::weak_ptr thisWeakPtr = this->shared_from_this(); + // The following ensures that only one callback is triggered from one endpoint + // even if we are waiting for two handles (process and event). + auto isCalled = std::make_shared(false); - // The following ensures that only one callback is triggered from one endpoint - // even if we are waiting for two handles (process and event). - auto isCalled = std::make_shared(false); + std::lock_guard lock(m_mutexOnRemoteMonitors); - std::lock_guard lock(m_mutexOnRemoteMonitors); + // Note that the following call may throw since opening handles may fail, but + // it is exception safe (std::map::emplace has a strong guarantee on it). + if (!m_remoteMonitors + .emplace(remoteEndPoint, + std::make_unique( + remoteEndPoint, + [thisWeakPtr, callback, + isCalled](const auto& remoteEndPoint) { + if (isCalled->exchange(true)) { + return; + } - // Note that the following call may throw since opening handles may fail, but - // it is exception safe (std::map::emplace has a strong guarantee on it). - if (!m_remoteMonitors.emplace( - remoteEndPoint, - std::make_unique( - remoteEndPoint, - [thisWeakPtr, callback, isCalled](const auto& remoteEndPoint) - { - if (isCalled->exchange(true)) - { - return; - } - - callback(remoteEndPoint); - auto connectionMonitor = thisWeakPtr.lock(); - if (connectionMonitor != nullptr) - { - // Cannot call UnRegister() because it will self-destruct. - // Instead, call the UnRegister(const EndPointInfo&) and queue up the end point - // that will be removed from m_remoteEvents at a later time. - connectionMonitor->UnRegister(remoteEndPoint); - } - })).second) - { - throw RuntimeException("Duplicate end point found."); - } + callback(remoteEndPoint); + auto connectionMonitor = thisWeakPtr.lock(); + if (connectionMonitor != nullptr) { + // Cannot call UnRegister() because it will + // self-destruct. Instead, call the UnRegister(const + // EndPointInfo&) and queue up the end point that + // will be removed from m_remoteEvents at a later + // time. + connectionMonitor->UnRegister(remoteEndPoint); + } + })) + .second) { + throw RuntimeException("Duplicate end point found."); + } } +void ConnectionMonitor::UnRegister(const EndPointInfo& remoteEndPoint) { + std::lock_guard lock(m_mutexOnUnregisteredEndPoints); + m_unregisteredEndPoints.emplace_back(remoteEndPoint); +} -void ConnectionMonitor::UnRegister(const EndPointInfo& remoteEndPoint) -{ +void ConnectionMonitor::UnRegister() const { + std::vector unregisteredEndPoints; + { + // It is possible that the erase() in the following block can + // wait for the callback to finish (::WaitForThreadpoolWaitCallbacks). + // Since the callback calls the UnRegister(const EndPointinfo&), it can + // deadlock if this function holds the lock while calling the erase(). Thus, + // copy the m_unregisteredEndPoints and release the lock before calling + // erase() below. std::lock_guard lock(m_mutexOnUnregisteredEndPoints); - m_unregisteredEndPoints.emplace_back(remoteEndPoint); + unregisteredEndPoints.swap(m_unregisteredEndPoints); + } + + std::lock_guard lock(m_mutexOnRemoteMonitors); + for (const auto& endPoint : unregisteredEndPoints) { + m_remoteMonitors.erase(endPoint); + } } - -void ConnectionMonitor::UnRegister() const -{ - std::vector unregisteredEndPoints; - { - // It is possible that the erase() in the following block can - // wait for the callback to finish (::WaitForThreadpoolWaitCallbacks). - // Since the callback calls the UnRegister(const EndPointinfo&), it can deadlock - // if this function holds the lock while calling the erase(). - // Thus, copy the m_unregisteredEndPoints and release the lock before calling erase() below. - std::lock_guard lock(m_mutexOnUnregisteredEndPoints); - unregisteredEndPoints.swap(m_unregisteredEndPoints); - } - - std::lock_guard lock(m_mutexOnRemoteMonitors); - for (const auto& endPoint : unregisteredEndPoints) - { - m_remoteMonitors.erase(endPoint); - } -} - - // ConnectionMonitor::HandleMonitor::HandleMonitor class implementation. ConnectionMonitor::HandleMonitor::HandleMonitor( const EndPointInfo& remoteEndPoint, Callback callback) - : m_eventWaiter{ - std::make_unique( - Utils::Handle{ ::OpenEvent(SYNCHRONIZE, FALSE, StringConverter()(remoteEndPoint).c_str()) }, - [callback, endPoint = remoteEndPoint] { callback(endPoint); }) } - , m_processWaiter{ - std::make_unique( - Utils::Handle{ ::OpenProcess(SYNCHRONIZE, FALSE, remoteEndPoint.m_pid) }, - [callback, endPoint = remoteEndPoint] { callback(endPoint); }) } -{} - + : m_eventWaiter{std::make_unique( + Utils::Handle{::OpenEvent(SYNCHRONIZE, + FALSE, + StringConverter()(remoteEndPoint).c_str())}, + [callback, endPoint = remoteEndPoint] { callback(endPoint); })}, + m_processWaiter{std::make_unique( + Utils::Handle{ + ::OpenProcess(SYNCHRONIZE, FALSE, remoteEndPoint.m_pid)}, + [callback, endPoint = remoteEndPoint] { callback(endPoint); })} {} // ConnectionMonitor::HandleMonitor::Waiter class implementation. -ConnectionMonitor::HandleMonitor::Waiter::Waiter(Utils::Handle handle, Callback callback) - : m_handle{ std::move(handle) } - , m_callback{ callback } - , m_wait{ - ::CreateThreadpoolWait(OnEvent, this, NULL), - ::CloseThreadpoolWait } -{ - ::SetThreadpoolWait(m_wait.get(), static_cast(m_handle), NULL); +ConnectionMonitor::HandleMonitor::Waiter::Waiter(Utils::Handle handle, + Callback callback) + : m_handle{std::move(handle)}, + m_callback{callback}, + m_wait{::CreateThreadpoolWait(OnEvent, this, NULL), + ::CloseThreadpoolWait} { + ::SetThreadpoolWait(m_wait.get(), static_cast(m_handle), NULL); } +ConnectionMonitor::HandleMonitor::Waiter::~Waiter() { + ::SetThreadpoolWait(m_wait.get(), NULL, NULL); -ConnectionMonitor::HandleMonitor::Waiter::~Waiter() -{ - ::SetThreadpoolWait(m_wait.get(), NULL, NULL); - - ::WaitForThreadpoolWaitCallbacks(m_wait.get(), TRUE); + ::WaitForThreadpoolWaitCallbacks(m_wait.get(), TRUE); } - VOID CALLBACK ConnectionMonitor::HandleMonitor::Waiter::OnEvent( PTP_CALLBACK_INSTANCE /*instance*/, PVOID context, PTP_WAIT /*wait*/, - TP_WAIT_RESULT waitResult) -{ - if (waitResult == WAIT_OBJECT_0) - { - static_cast(context)->m_callback(); - } - else - { - throw std::runtime_error{ "Unexpected wait result is received." }; - } + TP_WAIT_RESULT waitResult) { + if (waitResult == WAIT_OBJECT_0) { + static_cast(context)->m_callback(); + } else { + throw std::runtime_error{"Unexpected wait result is received."}; + } } -} // namespace Connection -} // namespace Interprocess -} // namespace L4 +} // namespace Connection +} // namespace Interprocess +} // namespace L4 diff --git a/src/Interprocess/Connection/EndPointInfoUtils.cpp b/src/Interprocess/Connection/EndPointInfoUtils.cpp index 79f2d49..74b558d 100644 --- a/src/Interprocess/Connection/EndPointInfoUtils.cpp +++ b/src/Interprocess/Connection/EndPointInfoUtils.cpp @@ -1,35 +1,26 @@ #include "Interprocess/Connection/EndPointInfoUtils.h" -#include "Utils/Windows.h" #include #include +#include "Utils/Windows.h" -namespace L4 -{ -namespace Interprocess -{ -namespace Connection -{ +namespace L4 { +namespace Interprocess { +namespace Connection { // EndPointInfoFactory class implementation. -EndPointInfo EndPointInfoFactory::Create() const -{ - return EndPointInfo{ GetCurrentProcessId(), boost::uuids::random_generator()() }; +EndPointInfo EndPointInfoFactory::Create() const { + return EndPointInfo{GetCurrentProcessId(), + boost::uuids::random_generator()()}; } - // StringConverter class implementation. -std::string StringConverter::operator()(const EndPointInfo& endPoint) const -{ - return "[pid:" - + std::to_string(endPoint.m_pid) - + "," - + "uuid:" - + boost::uuids::to_string(endPoint.m_uuid) - + "]"; +std::string StringConverter::operator()(const EndPointInfo& endPoint) const { + return "[pid:" + std::to_string(endPoint.m_pid) + "," + + "uuid:" + boost::uuids::to_string(endPoint.m_uuid) + "]"; } -} // namespace Connection -} // namespace Interprocess -} // namespace L4 +} // namespace Connection +} // namespace Interprocess +} // namespace L4 diff --git a/src/Interprocess/Utils/Handle.cpp b/src/Interprocess/Utils/Handle.cpp index 53cde38..bc233c8 100644 --- a/src/Interprocess/Utils/Handle.cpp +++ b/src/Interprocess/Utils/Handle.cpp @@ -1,48 +1,35 @@ #include "Interprocess/Utils/Handle.h" -#include "Utils/Exception.h" #include +#include "Utils/Exception.h" -namespace L4 -{ -namespace Interprocess -{ -namespace Utils -{ +namespace L4 { +namespace Interprocess { +namespace Utils { // Handle class implementation. Handle::Handle(HANDLE handle, bool verifyHandle) - : m_handle{ Verify(handle, verifyHandle), ::CloseHandle } -{} + : m_handle{Verify(handle, verifyHandle), ::CloseHandle} {} +Handle::Handle(Handle&& other) : m_handle{std::move(other.m_handle)} {} -Handle::Handle(Handle&& other) - : m_handle{ std::move(other.m_handle) } -{} - - -Handle::operator HANDLE() const -{ - return m_handle.get(); +Handle::operator HANDLE() const { + return m_handle.get(); } - -HANDLE Handle::Verify(HANDLE handle, bool verifyHandle) const -{ - if (handle == NULL || handle == INVALID_HANDLE_VALUE || verifyHandle) - { - auto error = ::GetLastError(); - if (error != ERROR_SUCCESS) - { - boost::format err("Invalid handle: %1%."); - err % error; - throw RuntimeException(err.str()); - } +HANDLE Handle::Verify(HANDLE handle, bool verifyHandle) const { + if (handle == NULL || handle == INVALID_HANDLE_VALUE || verifyHandle) { + auto error = ::GetLastError(); + if (error != ERROR_SUCCESS) { + boost::format err("Invalid handle: %1%."); + err % error; + throw RuntimeException(err.str()); } + } - return handle; + return handle; } -} // namespace Utils -} // namespace Interprocess -} // namespace L4 +} // namespace Utils +} // namespace Interprocess +} // namespace L4 diff --git a/src/PerfLogger.cpp b/src/PerfLogger.cpp index 47c9f55..8fbe305 100644 --- a/src/PerfLogger.cpp +++ b/src/PerfLogger.cpp @@ -1,25 +1,21 @@ #include "Log/PerfLogger.h" -#include "Utils/Exception.h" #include +#include "Utils/Exception.h" -namespace L4 -{ +namespace L4 { // PerfData class implementation. -void PerfData::AddHashTablePerfData(const char* hashTableName, const HashTablePerfData& perfData) -{ - auto result = m_hashTablesPerfData.insert( - std::make_pair( - hashTableName, - HashTablesPerfData::mapped_type(perfData))); +void PerfData::AddHashTablePerfData(const char* hashTableName, + const HashTablePerfData& perfData) { + auto result = m_hashTablesPerfData.insert( + std::make_pair(hashTableName, HashTablesPerfData::mapped_type(perfData))); - if (!result.second) - { - boost::format err("Duplicate hash table name found: '%1%'."); - err % hashTableName; - throw RuntimeException(err.str()); - } + if (!result.second) { + boost::format err("Duplicate hash table name found: '%1%'."); + err % hashTableName; + throw RuntimeException(err.str()); + } } -} // namespace L4 \ No newline at end of file +} // namespace L4 \ No newline at end of file