diff --git a/Benchmark/Benchmark.vcxproj b/Benchmark/Benchmark.vcxproj new file mode 100644 index 0000000..ff51c07 --- /dev/null +++ b/Benchmark/Benchmark.vcxproj @@ -0,0 +1,71 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33} + + + + Application + + + true + v140 + + + false + v140 + + + + L4.Benchmark + + + + Console + true + + + MachineX64 + + + $(SolutionDir)inc;$(SolutionDir)inc/L4;%(AdditionalIncludeDirectories) + _SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MaxSpeed + AnySuitable + true + + + + + + + + {b7846115-88f1-470b-a625-9de0c29229bb} + + + + + + + + + + This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + + + + + \ No newline at end of file diff --git a/Benchmark/Benchmark.vcxproj.filters b/Benchmark/Benchmark.vcxproj.filters new file mode 100644 index 0000000..32a8145 --- /dev/null +++ b/Benchmark/Benchmark.vcxproj.filters @@ -0,0 +1,25 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Source Files + + + + + + \ No newline at end of file diff --git a/Benchmark/main.cpp b/Benchmark/main.cpp new file mode 100644 index 0000000..560c1aa --- /dev/null +++ b/Benchmark/main.cpp @@ -0,0 +1,710 @@ +#include "L4/LocalMemory/HashTableService.h" +#include "L4/Log/PerfCounter.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +class Timer +{ +public: + Timer() + : m_start{ std::chrono::high_resolution_clock::now() } + {} + + void Reset() + { + m_start = std::chrono::high_resolution_clock::now(); + } + + std::chrono::microseconds GetElapsedTime() + { + return std::chrono::duration_cast( + std::chrono::high_resolution_clock::now() - m_start); + } + +private: + std::chrono::time_point m_start; +}; + + +class SynchronizedTimer +{ +public: + SynchronizedTimer() = default; + + void Start() + { + if (m_isStarted) + { + return; + } + m_isStarted = true; + m_startCount = std::chrono::high_resolution_clock::now().time_since_epoch().count(); + + } + + void End() + { + m_endCount = std::chrono::high_resolution_clock::now().time_since_epoch().count(); + } + + std::chrono::microseconds GetElapsedTime() + { + std::chrono::nanoseconds start{ m_startCount }; + std::chrono::nanoseconds end{ m_endCount }; + + return std::chrono::duration_cast(end - start); + } + +private: + std::atomic_bool m_isStarted = false; + std::atomic_uint64_t m_startCount; + std::atomic_uint64_t m_endCount; +}; + + +struct PerThreadInfoForWriteTest +{ + std::thread m_thread; + std::size_t m_dataSetSize = 0; + std::chrono::microseconds m_totalTime; +}; + + +struct PerThreadInfoForReadTest +{ + std::thread m_thread; + std::size_t m_dataSetSize = 0; + std::chrono::microseconds m_totalTime; +}; + + +struct CommandLineOptions +{ + static constexpr std::size_t c_defaultDataSetSize = 1000000; + static constexpr std::uint32_t c_defaultNumBuckets = 1000000; + static constexpr std::uint16_t c_defaultKeySize = 16; + static constexpr std::uint32_t c_defaultValueSize = 100; + static constexpr bool c_defaultRandomizeValueSize = false; + static constexpr std::uint32_t c_defaultNumIterationsPerGetContext = 1; + static constexpr std::uint16_t c_defaultNumThreads = 1; + static constexpr std::uint32_t c_defaultEpochProcessingIntervalInMilli = 10; + static constexpr std::uint16_t c_defaultNumActionsQueue = 1; + static constexpr std::uint32_t c_defaultRecordTimeToLiveInSeconds = 300; + static constexpr std::uint64_t c_defaultCacheSizeInBytes = 1024 * 1024 * 1024; + static constexpr bool c_defaultForceTimeBasedEviction = false; + + std::string m_module; + std::size_t m_dataSetSize = 0; + std::uint32_t m_numBuckets = 0; + std::uint16_t m_keySize = 0; + std::uint32_t m_valueSize = 0; + bool m_randomizeValueSize = false; + std::uint32_t m_numIterationsPerGetContext = 0; + std::uint16_t m_numThreads = 0; + std::uint32_t m_epochProcessingIntervalInMilli; + std::uint8_t m_numActionsQueue = 0; + + // The followings are specific for cache hash tables. + std::uint32_t m_recordTimeToLiveInSeconds = 0U; + std::uint64_t m_cacheSizeInBytes = 0U; + bool m_forceTimeBasedEviction = false; + + bool IsCachingModule() const + { + static const std::string c_cachingModulePrefix{ "cache" }; + return m_module.substr(0, c_cachingModulePrefix.size()) == c_cachingModulePrefix; + } +}; + + +class DataGenerator +{ +public: + DataGenerator( + std::size_t dataSetSize, + std::uint16_t keySize, + std::uint32_t valueSize, + bool randomizeValueSize, + bool isDebugMode = false) + : m_dataSetSize{ dataSetSize } + , m_keySize{ keySize } + { + if (isDebugMode) + { + std::cout << "Generating data set with size = " << dataSetSize << std::endl; + } + + Timer timer; + + // Populate keys. + m_keys.resize(m_dataSetSize); + m_keysBuffer.resize(m_dataSetSize); + for (std::size_t i = 0; i < m_dataSetSize; ++i) + { + m_keysBuffer[i].resize(keySize); + std::generate(m_keysBuffer[i].begin(), m_keysBuffer[i].end(), std::rand); + std::snprintf(reinterpret_cast(m_keysBuffer[i].data()), keySize, "%llu", i); + m_keys[i].m_data = m_keysBuffer[i].data(); + m_keys[i].m_size = m_keySize; + } + + // Populate values buffer. Assumes srand() is already called. + std::generate(m_valuesBuffer.begin(), m_valuesBuffer.end(), std::rand); + + // Populate values. + m_values.resize(m_dataSetSize); + std::size_t currentIndex = 0; + for (std::size_t i = 0; i < m_dataSetSize; ++i) + { + m_values[i].m_data = &m_valuesBuffer[currentIndex % c_valuesBufferSize]; + m_values[i].m_size = randomizeValueSize ? rand() % valueSize : valueSize; + currentIndex += valueSize; + } + + if (isDebugMode) + { + std::cout << "Finished generating data in " + << timer.GetElapsedTime().count() << " microseconds" << std::endl; + } + } + + L4::IReadOnlyHashTable::Key GetKey(std::size_t index) const + { + return m_keys[index % m_dataSetSize]; + } + + L4::IReadOnlyHashTable::Value GetValue(std::size_t index) const + { + return m_values[index % m_dataSetSize]; + } + +private: + std::size_t m_dataSetSize; + std::uint16_t m_keySize; + + std::vector> m_keysBuffer; + std::vector m_keys; + std::vector m_values; + + static const std::size_t c_valuesBufferSize = 64 * 1024; + std::array m_valuesBuffer; +}; + + +void PrintHardwareInfo() +{ + SYSTEM_INFO sysInfo; + GetSystemInfo(&sysInfo); + + printf("\n"); + printf("Hardware information: \n"); + printf("-------------------------------------\n"); + printf("%22s | %10u |\n", "OEM ID", sysInfo.dwOemId); + printf("%22s | %10u |\n", "Number of processors", sysInfo.dwNumberOfProcessors); + printf("%22s | %10u |\n", "Page size", sysInfo.dwPageSize); + printf("%22s | %10u |\n", "Processor type", sysInfo.dwProcessorType); + printf("-------------------------------------\n"); + printf("\n"); +} + + +void PrintOptions(const CommandLineOptions& options) +{ + printf("------------------------------------------------------\n"); + + printf("%39s | %10llu |\n", "Data set size", options.m_dataSetSize); + printf("%39s | %10lu |\n", "Number of hash table buckets", options.m_numBuckets); + printf("%39s | %10lu |\n", "Key size", options.m_keySize); + printf("%39s | %10lu |\n", "Value type", options.m_valueSize); + printf("%39s | %10lu |\n", "Number of iterations per GetContext()", options.m_numIterationsPerGetContext); + printf("%39s | %10lu |\n", "Epoch processing interval (ms)", options.m_epochProcessingIntervalInMilli); + printf("%39s | %10lu |\n", "Number of actions queue", options.m_numActionsQueue); + + if (options.IsCachingModule()) + { + printf("%39s | %10lu |\n", "Record time to live (s)", options.m_recordTimeToLiveInSeconds); + printf("%39s | %10llu |\n", "Cache size in bytes", options.m_cacheSizeInBytes); + printf("%39s | %10lu |\n", "Force time-based eviction", options.m_forceTimeBasedEviction); + } + + printf("------------------------------------------------------\n\n"); +} + + +void PrintHashTableCounters(const L4::HashTablePerfData& perfData) +{ + printf("HashTableCounter:\n"); + printf("----------------------------------------------------\n"); + for (auto i = 0; i < static_cast(L4::HashTablePerfCounter::Count); ++i) + { + printf("%35s | %12llu |\n", + L4::c_hashTablePerfCounterNames[i], + perfData.Get(static_cast(i))); + } + printf("----------------------------------------------------\n\n"); +} + + +L4::HashTableConfig CreateHashTableConfig(const CommandLineOptions& options) +{ + return L4::HashTableConfig( + "Table1", + L4::HashTableConfig::Setting{ options.m_numBuckets }, + options.IsCachingModule() + ? boost::optional{ + L4::HashTableConfig::Cache{ + options.m_cacheSizeInBytes, + std::chrono::seconds{ options.m_recordTimeToLiveInSeconds }, + options.m_forceTimeBasedEviction }} + : boost::none); +} + + +L4::EpochManagerConfig CreateEpochManagerConfig(const CommandLineOptions& options) +{ + return L4::EpochManagerConfig( + 10000U, + std::chrono::milliseconds(options.m_epochProcessingIntervalInMilli), + options.m_numActionsQueue); +} + + +void ReadPerfTest(const CommandLineOptions& options) +{ + printf("Performing read-perf which reads all the records inserted:\n"); + + PrintOptions(options); + + auto dataGenerator = std::make_unique( + options.m_dataSetSize, + options.m_keySize, + options.m_valueSize, + options.m_randomizeValueSize); + + L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options)); + const auto hashTableIndex = service.AddHashTable(CreateHashTableConfig(options)); + + // Insert data set. + auto context = service.GetContext(); + auto& hashTable = context[hashTableIndex]; + + std::vector randomIndices(options.m_dataSetSize); + for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i) + { + randomIndices[i] = i; + } + if (options.m_numThreads > 0) + { + // Randomize index only if multiple threads are running + // not to skew the results. + std::random_shuffle(randomIndices.begin(), randomIndices.end()); + } + + for (int i = 0; i < options.m_dataSetSize; ++i) + { + auto key = dataGenerator->GetKey(randomIndices[i]); + auto val = dataGenerator->GetValue(randomIndices[i]); + + hashTable.Add(key, val); + } + + std::vector allInfo; + allInfo.resize(options.m_numThreads); + + SynchronizedTimer overallTimer; + std::mutex mutex; + std::condition_variable cv; + const auto isCachingModule = options.IsCachingModule(); + bool isReady = false; + + const std::size_t dataSetSizePerThread = options.m_dataSetSize / options.m_numThreads; + for (std::uint16_t i = 0; i < options.m_numThreads; ++i) + { + auto& info = allInfo[i]; + + std::size_t startIndex = i * dataSetSizePerThread; + info.m_dataSetSize = (i + 1 == options.m_numThreads) + ? options.m_dataSetSize - startIndex + : dataSetSizePerThread; + + info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, &cv, &isReady, &overallTimer] + { + { + std::unique_lock lock(mutex); + cv.wait(lock, [&] { return isReady == true; }); + } + + overallTimer.Start(); + + Timer totalTimer; + Timer getTimer; + + std::size_t iteration = 0; + bool isDone = false; + + while (!isDone) + { + auto context = service.GetContext(); + auto& hashTable = context[hashTableIndex]; + + for (std::uint32_t j = 0; !isDone && j < options.m_numIterationsPerGetContext; ++j) + { + auto key = dataGenerator->GetKey(startIndex + iteration); + L4::IReadOnlyHashTable::Value val; + + if (!hashTable.Get(key, val) && !isCachingModule) + { + throw std::runtime_error("Look up failure is not allowed in this test."); + } + + isDone = (++iteration == info.m_dataSetSize); + } + } + + overallTimer.End(); + + info.m_totalTime = totalTimer.GetElapsedTime(); + }); + } + + { + std::unique_lock lock(mutex); + isReady = true; + } + + // Now, start the benchmarking for all threads. + cv.notify_all(); + + for (auto& info : allInfo) + { + info.m_thread.join(); + } + + PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData()); + + printf("Result:\n"); + printf(" | Total | |\n"); + printf(" | micros/op | microseconds | DataSetSize |\n"); + printf(" -----------------------------------------------------------\n"); + + for (std::size_t i = 0; i < allInfo.size(); ++i) + { + const auto& info = allInfo[i]; + + printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n", + (i + 1), + static_cast(info.m_totalTime.count()) / info.m_dataSetSize, + info.m_totalTime.count(), + info.m_dataSetSize); + } + printf(" -----------------------------------------------------------\n"); + + printf(" Overall | %11.3f | %14llu | %13llu |\n", + static_cast(overallTimer.GetElapsedTime().count()) / options.m_dataSetSize, + overallTimer.GetElapsedTime().count(), + options.m_dataSetSize); +} + + +void WritePerfTest(const CommandLineOptions& options) +{ + if (options.m_module == "overwrite-perf") + { + printf("Performing overwrite-perf (writing data with unique keys, then overwrite data with same keys):\n"); + } + else + { + printf("Performing write-perf (writing data with unique keys):\n"); + } + + PrintOptions(options); + + auto dataGenerator = std::make_unique( + options.m_dataSetSize, + options.m_keySize, + options.m_valueSize, + options.m_randomizeValueSize); + + L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options)); + const auto hashTableIndex = service.AddHashTable(CreateHashTableConfig(options)); + + if (options.m_module == "overwrite-perf") + { + std::vector randomIndices(options.m_dataSetSize); + for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i) + { + randomIndices[i] = i; + } + if (options.m_numThreads > 0) + { + // Randomize index only if multiple threads are running + // not to skew the results. + std::random_shuffle(randomIndices.begin(), randomIndices.end()); + } + + auto context = service.GetContext(); + auto& hashTable = context[hashTableIndex]; + + for (int i = 0; i < options.m_dataSetSize; ++i) + { + const auto index = randomIndices[i]; + auto key = dataGenerator->GetKey(index); + auto val = dataGenerator->GetValue(index); + + hashTable.Add(key, val); + } + } + + std::vector allInfo; + allInfo.resize(options.m_numThreads); + + SynchronizedTimer overallTimer; + std::mutex mutex; + std::condition_variable cv; + bool isReady = false; + + const std::size_t dataSetSizePerThread = options.m_dataSetSize / options.m_numThreads; + for (std::uint16_t i = 0; i < options.m_numThreads; ++i) + { + auto& info = allInfo[i]; + + std::size_t startIndex = i * dataSetSizePerThread; + info.m_dataSetSize = (i + 1 == options.m_numThreads) + ? options.m_dataSetSize - startIndex + : dataSetSizePerThread; + + info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, &cv, &isReady, &overallTimer] + { + { + std::unique_lock lock(mutex); + cv.wait(lock, [&] { return isReady == true; }); + } + + overallTimer.Start(); + + Timer totalTimer; + Timer addTimer; + + std::size_t iteration = 0; + bool isDone = false; + + while (!isDone) + { + auto context = service.GetContext(); + auto& hashTable = context[hashTableIndex]; + + for (std::uint32_t j = 0; !isDone && j < options.m_numIterationsPerGetContext; ++j) + { + const auto index = startIndex + iteration; + auto key = dataGenerator->GetKey(index); + auto val = dataGenerator->GetValue(index); + + hashTable.Add(key, val); + + isDone = (++iteration == info.m_dataSetSize); + } + } + + info.m_totalTime = totalTimer.GetElapsedTime(); + overallTimer.End(); + }); + } + + { + std::unique_lock lock(mutex); + isReady = true; + } + + // Now, start the benchmarking for all threads. + cv.notify_all(); + + for (auto& info : allInfo) + { + info.m_thread.join(); + } + + PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData()); + + printf("Result:\n"); + printf(" | Total | |\n"); + printf(" | micros/op | microseconds | DataSetSize |\n"); + printf(" -----------------------------------------------------------\n"); + + for (std::size_t i = 0; i < allInfo.size(); ++i) + { + const auto& info = allInfo[i]; + + printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n", + (i + 1), + static_cast(info.m_totalTime.count()) / info.m_dataSetSize, + info.m_totalTime.count(), + info.m_dataSetSize); + } + printf(" -----------------------------------------------------------\n"); + + printf(" Overall | %11.3f | %14llu | %13llu |\n", + static_cast(overallTimer.GetElapsedTime().count()) / options.m_dataSetSize, + overallTimer.GetElapsedTime().count(), + options.m_dataSetSize); + + if (options.m_numThreads == 1) + { + auto& perfData = service.GetContext()[hashTableIndex].GetPerfData(); + std::uint64_t totalBytes = perfData.Get(L4::HashTablePerfCounter::TotalKeySize) + + perfData.Get(L4::HashTablePerfCounter::TotalValueSize); + + auto& info = allInfo[0]; + + double opsPerSec = static_cast(info.m_dataSetSize) / info.m_totalTime.count() * 1000000.0; + double MBPerSec = static_cast(totalBytes) / info.m_totalTime.count(); + printf(" %10.3f ops/sec %10.3f MB/sec\n", opsPerSec, MBPerSec); + } +} + + +CommandLineOptions Parse(int argc, char** argv) +{ + namespace po = boost::program_options; + + po::options_description general("General options"); + general.add_options() + ("help", "produce a help message") + ("help-module", po::value(), + "produce a help for the following modules:\n" + " write-perf\n" + " overwrite-perf\n" + " read-perf\n" + " cache-read-perf\n" + " cache-write-perf\n") + ("module", po::value(), + "Runs the given module"); + + po::options_description benchmarkOptions("Benchmark options."); + benchmarkOptions.add_options() + ("dataSetSize", po::value()->default_value(CommandLineOptions::c_defaultDataSetSize), "data set size") + ("numBuckets", po::value()->default_value(CommandLineOptions::c_defaultNumBuckets), "number of buckets") + ("keySize", po::value()->default_value(CommandLineOptions::c_defaultKeySize), "key size in bytes") + ("valueSize", po::value()->default_value(CommandLineOptions::c_defaultValueSize), "value size in bytes") + ("randomizeValueSize", "randomize value size") + ("numIterationsPerGetContext", po::value()->default_value(CommandLineOptions::c_defaultNumIterationsPerGetContext), "number of iterations per GetContext()") + ("numThreads", po::value()->default_value(CommandLineOptions::c_defaultNumThreads), "number of threads to create") + ("epochProcessingInterval", po::value()->default_value(CommandLineOptions::c_defaultEpochProcessingIntervalInMilli), "epoch processing interval (ms)") + ("numActionsQueue", po::value()->default_value(CommandLineOptions::c_defaultNumActionsQueue), "number of actions queue") + ("recordTimeToLive", po::value()->default_value(CommandLineOptions::c_defaultRecordTimeToLiveInSeconds), "record time to live (s)") + ("cacheSize", po::value()->default_value(CommandLineOptions::c_defaultCacheSizeInBytes), "cache size in bytes") + ("forceTimeBasedEviction", po::value()->default_value(CommandLineOptions::c_defaultForceTimeBasedEviction), "force time based eviction"); + + po::options_description all("Allowed options"); + all.add(general).add(benchmarkOptions); + + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, all), vm); + po::notify(vm); + + CommandLineOptions options; + + if (vm.count("help")) + { + std::cout << all; + } + else if (vm.count("module")) + { + options.m_module = vm["module"].as(); + + if (vm.count("dataSetSize")) + { + options.m_dataSetSize = vm["dataSetSize"].as(); + } + if (vm.count("numBuckets")) + { + options.m_numBuckets = vm["numBuckets"].as(); + } + if (vm.count("keySize")) + { + options.m_keySize = vm["keySize"].as(); + } + if (vm.count("valueSize")) + { + options.m_valueSize = vm["valueSize"].as(); + } + if (vm.count("randomizeValueSize")) + { + options.m_randomizeValueSize = true; + } + if (vm.count("numIterationsPerGetContext")) + { + options.m_numIterationsPerGetContext = vm["numIterationsPerGetContext"].as(); + } + if (vm.count("numThreads")) + { + options.m_numThreads = vm["numThreads"].as(); + } + if (vm.count("epochProcessingInterval")) + { + options.m_epochProcessingIntervalInMilli = vm["epochProcessingInterval"].as(); + } + if (vm.count("numActionsQueue")) + { + options.m_numActionsQueue = vm["numActionsQueue"].as(); + } + if (vm.count("recordTimeToLive")) + { + options.m_recordTimeToLiveInSeconds = vm["recordTimeToLive"].as(); + } + if (vm.count("cacheSize")) + { + options.m_cacheSizeInBytes = vm["cacheSize"].as(); + } + if (vm.count("forceTimeBasedEviction")) + { + options.m_forceTimeBasedEviction = vm["forceTimeBasedEviction"].as(); + } + } + else + { + std::cout << all; + } + + return options; +} + + +int main(int argc, char** argv) +{ + auto options = Parse(argc, argv); + + if (options.m_module.empty()) + { + return 0; + } + + std::srand(static_cast(time(NULL))); + + PrintHardwareInfo(); + + if (options.m_module == "write-perf" + || options.m_module == "overwrite-perf" + || options.m_module == "cache-write-perf") + { + WritePerfTest(options); + } + else if (options.m_module == "read-perf" + || options.m_module == "cache-read-perf") + { + ReadPerfTest(options); + } + else + { + std::cout << "Unknown module: " << options.m_module << std::endl; + } + + return 0; +} + diff --git a/Benchmark/packages.config b/Benchmark/packages.config new file mode 100644 index 0000000..6f4997b --- /dev/null +++ b/Benchmark/packages.config @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/Examples/Examples.vcxproj b/Examples/Examples.vcxproj new file mode 100644 index 0000000..373f483 --- /dev/null +++ b/Examples/Examples.vcxproj @@ -0,0 +1,84 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {9672B9F5-84A6-4063-972C-A4DC23200B42} + Examples + + + + Application + true + v140 + + + Application + false + v140 + + + + + + + + + true + + + false + + + + $(SolutionDir)inc\L4;%(AdditionalIncludeDirectories) + _SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MaxSpeed + AnySuitable + true + + + + + Console + true + + + + + + + + {b7846115-88f1-470b-a625-9de0c29229bb} + + + + + + + + + + + + + + + + + + + This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + + \ No newline at end of file diff --git a/Examples/Examples.vcxproj.filters b/Examples/Examples.vcxproj.filters new file mode 100644 index 0000000..643b3c5 --- /dev/null +++ b/Examples/Examples.vcxproj.filters @@ -0,0 +1,39 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Source Files + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + + + + \ No newline at end of file diff --git a/Examples/main.cpp b/Examples/main.cpp new file mode 100644 index 0000000..369fa4b --- /dev/null +++ b/Examples/main.cpp @@ -0,0 +1,98 @@ +#include "Log/IPerfLogger.h" +#include "LocalMemory/HashTableService.h" +#include +#include +#include +#include +#include +#include + + +using namespace L4; + +class ConsolePerfLogger : public IPerfLogger +{ + virtual void Log(const IData& perfData) override + { + for (auto i = 0; i < static_cast(ServerPerfCounter::Count); ++i) + { + std::cout << c_serverPerfCounterNames[i] << ": " + << perfData.GetServerPerfData().Get(static_cast(i)) << std::endl; + } + + const auto& hashTablesPerfData = perfData.GetHashTablesPerfData(); + + for (const auto& entry : hashTablesPerfData) + { + std::cout << "Hash table '" << entry.first << "'" << std::endl; + + for (auto j = 0; j < static_cast(HashTablePerfCounter::Count); ++j) + { + std::cout << c_hashTablePerfCounterNames[j] << ": " + << entry.second.get().Get(static_cast(j)) << std::endl; + } + } + + std::cout << std::endl; + } +}; + + + +int main(int argc, char** argv) +{ + (void)argc; + (void)argv; + + LocalMemory::HashTableService service; + + auto index = service.AddHashTable( + HashTableConfig("Table1", HashTableConfig::Setting{ 1000000 })); + + static constexpr int keySize = 100; + static constexpr int valSize = 2000; + + char bufKey[keySize]; + char bufVal[valSize]; + + IWritableHashTable::Key key; + key.m_data = reinterpret_cast(bufKey); + IWritableHashTable::Value val; + val.m_data = reinterpret_cast(bufVal); + + std::ifstream file; + file.open(argv[1], std::ifstream::in); + std::cout << "Opening " << argv[1] << std::endl; + static const int BufferLength = 4096; + + char buffer[BufferLength]; + + auto totalTime = 0U; + int numLines = 0; + while (file.getline(buffer, BufferLength)) + { + auto context = service.GetContext(); + + auto& hashTable = context[index]; + + char* nextToken = nullptr; + const char* keyStr = strtok_s(buffer, "\t", &nextToken); + const char* valStr = strtok_s(nullptr, "\t", &nextToken); + + key.m_data = reinterpret_cast(keyStr); + key.m_size = static_cast(strlen(keyStr)); + + val.m_data = reinterpret_cast(valStr); + val.m_size = static_cast(strlen(valStr)); + + hashTable.Add(key, val); + + ++numLines; + } + + std::cout<< "Total Add() time" << totalTime << std::endl; + std::cout << "Added " << numLines << " lines." << std::endl; + + return 0; +} + diff --git a/Examples/packages.config b/Examples/packages.config new file mode 100644 index 0000000..1e79042 --- /dev/null +++ b/Examples/packages.config @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/L4.sln b/L4.sln new file mode 100644 index 0000000..8424867 --- /dev/null +++ b/L4.sln @@ -0,0 +1,40 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "L4", "Build\L4.vcxproj", "{B7846115-88F1-470B-A625-9DE0C29229BB}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Unittests", "Unittests\Unittests.vcxproj", "{8122529E-61CB-430B-A089-B12E63FC361B}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Examples", "Examples\Examples.vcxproj", "{9672B9F5-84A6-4063-972C-A4DC23200B42}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Benchmark", "Benchmark\Benchmark.vcxproj", "{B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {B7846115-88F1-470B-A625-9DE0C29229BB}.Debug|x64.ActiveCfg = Debug|x64 + {B7846115-88F1-470B-A625-9DE0C29229BB}.Debug|x64.Build.0 = Debug|x64 + {B7846115-88F1-470B-A625-9DE0C29229BB}.Release|x64.ActiveCfg = Release|x64 + {B7846115-88F1-470B-A625-9DE0C29229BB}.Release|x64.Build.0 = Release|x64 + {8122529E-61CB-430B-A089-B12E63FC361B}.Debug|x64.ActiveCfg = Debug|x64 + {8122529E-61CB-430B-A089-B12E63FC361B}.Debug|x64.Build.0 = Debug|x64 + {8122529E-61CB-430B-A089-B12E63FC361B}.Release|x64.ActiveCfg = Release|x64 + {8122529E-61CB-430B-A089-B12E63FC361B}.Release|x64.Build.0 = Release|x64 + {9672B9F5-84A6-4063-972C-A4DC23200B42}.Debug|x64.ActiveCfg = Debug|x64 + {9672B9F5-84A6-4063-972C-A4DC23200B42}.Debug|x64.Build.0 = Debug|x64 + {9672B9F5-84A6-4063-972C-A4DC23200B42}.Release|x64.ActiveCfg = Release|x64 + {9672B9F5-84A6-4063-972C-A4DC23200B42}.Release|x64.Build.0 = Release|x64 + {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Debug|x64.ActiveCfg = Debug|x64 + {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Debug|x64.Build.0 = Debug|x64 + {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Release|x64.ActiveCfg = Release|x64 + {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/Unittests/CacheHashTableTest.cpp b/Unittests/CacheHashTableTest.cpp new file mode 100644 index 0000000..5c6bb1d --- /dev/null +++ b/Unittests/CacheHashTableTest.cpp @@ -0,0 +1,615 @@ +#include "stdafx.h" +#include "Utils.h" +#include "Mocks.h" +#include "CheckedAllocator.h" +#include "L4/HashTable/Common/Record.h" +#include "L4/HashTable/Cache/Metadata.h" +#include "L4/HashTable/Cache/HashTable.h" + +#include +#include + +namespace L4 +{ +namespace UnitTests +{ + +using namespace HashTable::Cache; +using namespace std::chrono; + +class MockClock +{ +public: + MockClock() = default; + + seconds GetCurrentEpochTime() const + { + return s_currentEpochTime; + } + + static void SetEpochTime(seconds time) + { + s_currentEpochTime = time; + } + + static void IncrementEpochTime(seconds increment) + { + s_currentEpochTime += increment; + } + +private: + static seconds s_currentEpochTime; +}; + +seconds MockClock::s_currentEpochTime{ 0U }; + + +class CacheHashTableTestFixture +{ +public: + using Allocator = CheckedAllocator<>; + using CacheHashTable = WritableHashTable; + using ReadOnlyCacheHashTable = ReadOnlyHashTable; + using HashTable = CacheHashTable::HashTable; + + CacheHashTableTestFixture() + : m_allocator{} + , m_hashTable { HashTable::Setting{ 100U }, m_allocator } + , m_epochManager{} + { + MockClock::SetEpochTime(seconds{ 0U }); + } + + CacheHashTableTestFixture(const CacheHashTableTestFixture&) = delete; + CacheHashTableTestFixture& operator=(const CacheHashTableTestFixture&) = delete; + +protected: + template + bool Get(TCacheHashTable& hashTable, const std::string& key, IReadOnlyHashTable::Value& value) + { + return hashTable.Get( + Utils::ConvertFromString(key.c_str()), + value); + } + + void Add(CacheHashTable& hashTable, const std::string& key, const std::string& value) + { + hashTable.Add( + Utils::ConvertFromString(key.c_str()), + Utils::ConvertFromString(value.c_str())); + } + + void Remove(CacheHashTable& hashTable, const std::string& key) + { + hashTable.Remove(Utils::ConvertFromString(key.c_str())); + } + + template + bool CheckRecord(TCacheHashTable& hashTable, const std::string& key, const std::string& expectedValue) + { + IReadOnlyHashTable::Value value; + return Get(hashTable, key, value) && AreTheSame(value, expectedValue); + } + + bool AreTheSame(const IReadOnlyHashTable::Value& actual, const std::string& expected) + { + return (actual.m_size == expected.size()) + && !memcmp(actual.m_data, expected.c_str(), actual.m_size); + } + + template + bool Exist(const Blob& actual, const std::vector& expectedSet) + { + const std::string actualStr( + reinterpret_cast(actual.m_data), + actual.m_size); + + return std::find(expectedSet.cbegin(), expectedSet.cend(), actualStr) != expectedSet.cend(); + } + + Allocator m_allocator; + HashTable m_hashTable; + MockEpochManager m_epochManager; + MockClock m_clock; +}; + + +BOOST_AUTO_TEST_SUITE(CacheHashTableTests) + + +BOOST_AUTO_TEST_CASE(MetadataTest) +{ + std::vector buffer(20); + + // The following will test with 1..8 byte alignments. + for (std::uint16_t i = 0U; i < 8U; ++i) + { + std::uint32_t* metadataBuffer = reinterpret_cast(buffer.data() + i); + seconds currentEpochTime{ 0x7FABCDEF }; + + Metadata metadata{ metadataBuffer, currentEpochTime }; + + BOOST_CHECK(currentEpochTime == metadata.GetEpochTime()); + + // 10 seconds have elapsed. + currentEpochTime += seconds{ 10U }; + + // Check the expiration based on the time to live value. + BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{ 15 })); + BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{ 10 })); + BOOST_CHECK(metadata.IsExpired(currentEpochTime, seconds{ 5U })); + + // Test access state. + BOOST_CHECK(!metadata.IsAccessed()); + + metadata.UpdateAccessStatus(true); + BOOST_CHECK(metadata.IsAccessed()); + + metadata.UpdateAccessStatus(false); + BOOST_CHECK(!metadata.IsAccessed()); + } +} + + +BOOST_FIXTURE_TEST_CASE(ExpirationTest, CacheHashTableTestFixture) +{ + // Don't care about evict in this test case, so make the cache size big. + constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; + constexpr seconds c_recordTimeToLive{ 20U }; + + CacheHashTable hashTable( + m_hashTable, + m_epochManager, + c_maxCacheSizeInBytes, + c_recordTimeToLive, + false); + + const std::vector> c_keyValuePairs = + { + { "key1", "value1" }, + { "key2", "value2" }, + { "key3", "value3" }, + { "key4", "value4" }, + { "key5", "value5" } + }; + + // Add 5 records at a different epoch time (10 seconds increment). + for (const auto& pair : c_keyValuePairs) + { + MockClock::IncrementEpochTime(seconds{ 10 }); + Add(hashTable, pair.first, pair.second); + + // Make sure the records can be retrieved right away. The record has not been + // expired since the clock hasn't moved yet. + BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + } + + const auto& perfData = hashTable.GetPerfData(); + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::CacheHitCount, 5 } + }); + + // Now we have the following data sets: + // | Key | Value | Creation time | + // | key1 | value1 | 10 | + // | key2 | value2 | 20 | + // | key3 | value3 | 30 | + // | key4 | value4 | 40 | + // | key5 | value5 | 50 | + // And the current clock is at 50. + + // Do look ups and check expired records. + for (const auto& pair : c_keyValuePairs) + { + IReadOnlyHashTable::Value value; + // Our time to live value is 20, so key0 and key0 records should be expired. + if (pair.first == "key1" || pair.first == "key2") + { + BOOST_CHECK(!Get(hashTable, pair.first, value)); + } + else + { + BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + } + } + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::CacheHitCount, 8 }, + { HashTablePerfCounter::CacheMissCount, 2 } + }); + + MockClock::IncrementEpochTime(seconds{ 100 }); + + // All the records should be expired now. + for (const auto& pair : c_keyValuePairs) + { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(hashTable, pair.first, value)); + } + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::CacheHitCount, 8 }, + { HashTablePerfCounter::CacheMissCount, 7 } + }); +} + + +BOOST_FIXTURE_TEST_CASE(CacheHashTableIteratorTest, CacheHashTableTestFixture) +{ + // Don't care about evict in this test case, so make the cache size big. + constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; + constexpr seconds c_recordTimeToLive{ 20U }; + + CacheHashTable hashTable( + m_hashTable, + m_epochManager, + c_maxCacheSizeInBytes, + c_recordTimeToLive, + false); + + const std::vector c_keys = { "key1", "key2", "key3", "key4", "key5" }; + const std::vector c_vals = { "val1", "val2", "val3", "val4", "val5" }; + + // Add 5 records at a different epoch time (3 seconds increment). + for (std::size_t i = 0; i < c_keys.size(); ++i) + { + MockClock::IncrementEpochTime(seconds{ 3 }); + Add(hashTable, c_keys[i], c_vals[i]); + } + + // Now we have the following data sets: + // | Key | Value | Creation time | + // | key1 | value1 | 3 | + // | key2 | value2 | 6 | + // | key3 | value3 | 9 | + // | key4 | value4 | 12 | + // | key5 | value5 | 15 | + // And the current clock is at 15. + + auto iterator = hashTable.GetIterator(); + std::uint16_t numRecords = 0; + while (iterator->MoveNext()) + { + ++numRecords; + BOOST_CHECK(Exist(iterator->GetKey(), c_keys)); + BOOST_CHECK(Exist(iterator->GetValue(), c_vals)); + } + + BOOST_CHECK_EQUAL(numRecords, 5); + + // The clock becomes 30 and key1, key2 and key3 should expire. + MockClock::IncrementEpochTime(seconds{ 15 }); + + iterator = hashTable.GetIterator(); + numRecords = 0; + while (iterator->MoveNext()) + { + ++numRecords; + BOOST_CHECK( + Exist( + iterator->GetKey(), + std::vector{ c_keys.cbegin() + 2, c_keys.cend() })); + BOOST_CHECK( + Exist( + iterator->GetValue(), + std::vector{ c_vals.cbegin() + 2, c_vals.cend() })); + } + + BOOST_CHECK_EQUAL(numRecords, 2); + + // The clock becomes 40 and all records should be expired now. + MockClock::IncrementEpochTime(seconds{ 10 }); + + iterator = hashTable.GetIterator(); + while (iterator->MoveNext()) + { + BOOST_CHECK(false); + } +} + + +BOOST_FIXTURE_TEST_CASE(TimeBasedEvictionTest, CacheHashTableTestFixture) +{ + // We only care about time-based eviction in this test, so make the cache size big. + constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; + constexpr seconds c_recordTimeToLive{ 10U }; + + // Hash table with one bucket makes testing the time-based eviction easy. + HashTable internalHashTable{ HashTable::Setting{ 1 }, m_allocator }; + CacheHashTable hashTable( + internalHashTable, + m_epochManager, + c_maxCacheSizeInBytes, + c_recordTimeToLive, + true); + + const std::vector> c_keyValuePairs = + { + { "key1", "value1" }, + { "key2", "value2" }, + { "key3", "value3" }, + { "key4", "value4" }, + { "key5", "value5" } + }; + + for (const auto& pair : c_keyValuePairs) + { + Add(hashTable, pair.first, pair.second); + BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + } + + const auto& perfData = hashTable.GetPerfData(); + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::CacheHitCount, 5 }, + { HashTablePerfCounter::RecordsCount, 5 }, + { HashTablePerfCounter::EvictedRecordsCount, 0 }, + }); + + MockClock::IncrementEpochTime(seconds{ 20 }); + + // All the records should be expired now. + for (const auto& pair : c_keyValuePairs) + { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(hashTable, pair.first, value)); + } + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::CacheHitCount, 5 }, + { HashTablePerfCounter::CacheMissCount, 5 }, + { HashTablePerfCounter::RecordsCount, 5 }, + { HashTablePerfCounter::EvictedRecordsCount, 0 }, + }); + + // Now try to add one record and all the expired records should be evicted. + const auto& keyValuePair = c_keyValuePairs[0]; + Add(hashTable, keyValuePair.first, keyValuePair.second); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 1 }, + { HashTablePerfCounter::EvictedRecordsCount, 5 }, + }); +} + + +BOOST_FIXTURE_TEST_CASE(EvcitAllRecordsTest, CacheHashTableTestFixture) +{ + const auto& perfData = m_hashTable.m_perfData; + const auto initialTotalIndexSize = perfData.Get(HashTablePerfCounter::TotalIndexSize); + const std::uint64_t c_maxCacheSizeInBytes = 500 + initialTotalIndexSize; + constexpr seconds c_recordTimeToLive{ 5 }; + + CacheHashTable hashTable{ + m_hashTable, + m_epochManager, + c_maxCacheSizeInBytes, + c_recordTimeToLive, + false }; + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::EvictedRecordsCount, 0 }, + }); + + const std::vector> c_keyValuePairs = + { + { "key1", "value1" }, + { "key2", "value2" }, + { "key3", "value3" }, + { "key4", "value4" }, + { "key5", "value5" } + }; + + for (const auto& pair : c_keyValuePairs) + { + Add(hashTable, pair.first, pair.second); + } + + using L4::HashTable::RecordSerializer; + + // Variable key/value sizes. + const auto recordOverhead = RecordSerializer{ 0U, 0U }.CalculateRecordOverhead(); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, c_keyValuePairs.size() }, + { HashTablePerfCounter::TotalIndexSize, initialTotalIndexSize + (c_keyValuePairs.size() * recordOverhead) }, + { HashTablePerfCounter::EvictedRecordsCount, 0 }, + }); + + // Make sure all data records added are present and update the access status for each + // record in order to test that accessed records are deleted when it's under memory constraint. + for (const auto& pair : c_keyValuePairs) + { + BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second)); + } + + // Now insert a record that will force all the records to be evicted due to size. + std::string bigRecordKeyStr(10, 'k'); + std::string bigRecordValStr(500, 'v'); + + Add(hashTable, bigRecordKeyStr, bigRecordValStr); + + // Make sure all the previously inserted records are evicted. + for (const auto& pair : c_keyValuePairs) + { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(hashTable, pair.first, value)); + } + + // Make sure the big record is inserted. + BOOST_CHECK(CheckRecord(hashTable, bigRecordKeyStr, bigRecordValStr)); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 1 }, + { HashTablePerfCounter::TotalIndexSize, initialTotalIndexSize + (1 * recordOverhead) }, + { HashTablePerfCounter::EvictedRecordsCount, c_keyValuePairs.size() }, + }); +} + + +BOOST_FIXTURE_TEST_CASE(EvcitRecordsBasedOnAccessStatusTest, CacheHashTableTestFixture) +{ + const std::uint64_t c_maxCacheSizeInBytes + = 2000 + m_hashTable.m_perfData.Get(HashTablePerfCounter::TotalIndexSize); + const seconds c_recordTimeToLive{ 5 }; + + CacheHashTable hashTable( + m_hashTable, + m_epochManager, + c_maxCacheSizeInBytes, + c_recordTimeToLive, + false); + + constexpr std::uint32_t c_valueSize = 100; + const std::string c_valStr(c_valueSize, 'v'); + const auto& perfData = hashTable.GetPerfData(); + std::uint16_t key = 1; + + while ((static_cast(perfData.Get(HashTablePerfCounter::TotalIndexSize)) + + perfData.Get(HashTablePerfCounter::TotalKeySize) + + perfData.Get(HashTablePerfCounter::TotalValueSize) + + c_valueSize) + < c_maxCacheSizeInBytes) + { + std::stringstream ss; + ss << "key" << key; + Add(hashTable, ss.str(), c_valStr); + ++key; + } + + // Make sure no eviction happened. + BOOST_CHECK_EQUAL(m_epochManager.m_numRegisterActionsCalled, 0U); + + // Look up with the "key1" key to update the access state. + BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr)); + + // Now add a new key, which triggers an eviction, but deletes other records than the "key1" record. + Add(hashTable, "newkey", c_valStr); + + // Now, eviction should have happened. + BOOST_CHECK_GE(m_epochManager.m_numRegisterActionsCalled, 1U); + + // The "key1" record should not have been evicted. + BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr)); + + // Make sure the new key is actually added. + BOOST_CHECK(CheckRecord(hashTable, "newkey", c_valStr)); +} + + +// This is similar to the one in ReadWriteHashTableTest, but necessary since cache store adds the meta values. +BOOST_FIXTURE_TEST_CASE(FixedKeyValueHashTableTest, CacheHashTableTestFixture) +{ + // Fixed 4 byte keys and 6 byte values. + std::vector settings = + { + HashTable::Setting{ 100, 200, 4, 0 }, + HashTable::Setting{ 100, 200, 0, 6 }, + HashTable::Setting{ 100, 200, 4, 6 } + }; + + for (const auto& setting : settings) + { + // Don't care about evict in this test case, so make the cache size big. + constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF; + constexpr seconds c_recordTimeToLive{ 20U }; + + HashTable hashTable{ setting, m_allocator }; + CacheHashTable writableHashTable{ + hashTable, + m_epochManager, + c_maxCacheSizeInBytes, + c_recordTimeToLive, + false }; + + ReadOnlyCacheHashTable readOnlyHashTable{ hashTable, c_recordTimeToLive }; + + constexpr std::uint8_t c_numRecords = 10; + + // Add records. + for (std::uint8_t i = 0; i < c_numRecords; ++i) + { + Add(writableHashTable, "key" + std::to_string(i), "value" + std::to_string(i)); + } + + Utils::ValidateCounters( + writableHashTable.GetPerfData(), + { + { HashTablePerfCounter::RecordsCount, 10 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::TotalKeySize, 40 }, + { HashTablePerfCounter::TotalValueSize, 100 }, + { HashTablePerfCounter::MinKeySize, 4 }, + { HashTablePerfCounter::MaxKeySize, 4 }, + { HashTablePerfCounter::MinValueSize, 10 }, + { HashTablePerfCounter::MaxValueSize, 10 } + }); + + // Validate all the records added. + for (std::uint8_t i = 0; i < c_numRecords; ++i) + { + CheckRecord(readOnlyHashTable, "key" + std::to_string(i), "value" + std::to_string(i)); + } + + // Remove first half of the records. + for (std::uint8_t i = 0; i < c_numRecords / 2; ++i) + { + Remove(writableHashTable, "key" + std::to_string(i)); + } + + Utils::ValidateCounters( + writableHashTable.GetPerfData(), + { + { HashTablePerfCounter::RecordsCount, 5 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::TotalKeySize, 20 }, + { HashTablePerfCounter::TotalValueSize, 50 } + }); + + // Verify the records. + for (std::uint8_t i = 0; i < c_numRecords; ++i) + { + if (i < (c_numRecords / 2)) + { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value)); + } + else + { + CheckRecord(readOnlyHashTable, "key" + std::to_string(i), "value" + std::to_string(i)); + } + } + + // Expire all the records. + MockClock::IncrementEpochTime(seconds{ 100 }); + + // Verify the records. + for (std::uint8_t i = 0; i < c_numRecords; ++i) + { + IReadOnlyHashTable::Value value; + BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value)); + } + } +} + +BOOST_AUTO_TEST_SUITE_END() + +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/CheckedAllocator.h b/Unittests/CheckedAllocator.h new file mode 100644 index 0000000..7bb986f --- /dev/null +++ b/Unittests/CheckedAllocator.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include + +namespace L4 +{ +namespace UnitTests +{ + +struct AllocationAddressHolder : public std::set +{ + ~AllocationAddressHolder() + { + BOOST_REQUIRE(empty()); + } +}; + +template +class CheckedAllocator : public std::allocator +{ +public: + using Base = std::allocator; + + template + struct rebind + { + typedef CheckedAllocator other; + }; + + CheckedAllocator() + : m_allocationAddresses{ std::make_shared() } + {} + + CheckedAllocator(const CheckedAllocator&) = default; + + template + CheckedAllocator(const CheckedAllocator& other) + : m_allocationAddresses{ other.m_allocationAddresses } + {} + + template + CheckedAllocator& operator=(const CheckedAllocator& other) + { + m_allocationAddresses = other.m_allocationAddresses; + return (*this); + } + + pointer allocate(std::size_t count, std::allocator::const_pointer hint = 0) + { + auto address = Base::allocate(count, hint); + BOOST_REQUIRE(m_allocationAddresses->insert(address).second); + return address; + } + + void deallocate(pointer ptr, std::size_t count) + { + BOOST_REQUIRE(m_allocationAddresses->erase(ptr) == 1); + Base::deallocate(ptr, count); + } + + std::shared_ptr m_allocationAddresses; +}; + +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/EpochManagerTest.cpp b/Unittests/EpochManagerTest.cpp new file mode 100644 index 0000000..6803b90 --- /dev/null +++ b/Unittests/EpochManagerTest.cpp @@ -0,0 +1,187 @@ +#include "stdafx.h" +#include "Utils.h" +#include "L4/Epoch/EpochQueue.h" +#include "L4/Epoch/EpochActionManager.h" +#include "L4/LocalMemory/EpochManager.h" +#include "L4/Log/PerfCounter.h" +#include "L4/Utils/Lock.h" +#include + +namespace L4 +{ +namespace UnitTests +{ + +BOOST_AUTO_TEST_SUITE(EpochManagerTests) + +BOOST_AUTO_TEST_CASE(EpochRefManagerTest) +{ + std::uint64_t currentEpochCounter = 5U; + const std::uint32_t c_epochQueueSize = 100U; + + using EpochQueue = EpochQueue< + boost::shared_lock_guard, + std::lock_guard>; + + EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize); + + // Initially the ref count at the current epoch counter should be 0. + BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U); + + EpochRefManager epochManager(epochQueue); + + BOOST_CHECK_EQUAL(epochManager.AddRef(), currentEpochCounter); + + // Validate that a reference count is incremented at the current epoch counter. + BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 1U); + + epochManager.RemoveRef(currentEpochCounter); + + // Validate that a reference count is back to 0. + BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U); + + // Decrementing a reference counter when it is already 0 will result in an exception. + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + epochManager.RemoveRef(currentEpochCounter);, + "Reference counter is invalid."); +} + + +BOOST_AUTO_TEST_CASE(EpochCounterManagerTest) +{ + std::uint64_t currentEpochCounter = 0U; + const std::uint32_t c_epochQueueSize = 100U; + + using EpochQueue = EpochQueue< + boost::shared_lock_guard, + std::lock_guard>; + + EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize); + + EpochCounterManager epochCounterManager(epochQueue); + + // If RemoveUnreferenceEpochCounters() is called when m_fonrtIndex and m_backIndex are + // the same, it will just return either value. + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter); + + // Add two epoch counts. + ++currentEpochCounter; + ++currentEpochCounter; + epochCounterManager.AddNewEpoch(); + epochCounterManager.AddNewEpoch(); + + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, 0U); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_refCounts[epochQueue.m_frontIndex], 0U); + + // Since the m_frontIndex's reference count was zero, it will be incremented + // all the way to currentEpochCounter. + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + + EpochRefManager epochRefManager(epochQueue); + + // Now add a reference at the currentEpochCounter; + const auto epochCounterReferenced = epochRefManager.AddRef(); + BOOST_CHECK_EQUAL(epochCounterReferenced, currentEpochCounter); + + // Calling RemoveUnreferenceEpochCounters() should just return currentEpochCounter + // since m_frontIndex and m_backIndex is the same. (Not affected by adding a reference yet). + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + + // Add one epoch count. + ++currentEpochCounter; + epochCounterManager.AddNewEpoch(); + + // Now RemoveUnreferenceEpochCounters() should return epochCounterReferenced because + // of the reference count. + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), epochCounterReferenced); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, epochCounterReferenced); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); + + // Remove the reference. + epochRefManager.RemoveRef(epochCounterReferenced); + + // Now RemoveUnreferenceEpochCounters() should return currentEpochCounter and m_frontIndex + // should be in sync with m_backIndex. + BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter); + BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter); +} + + +BOOST_AUTO_TEST_CASE(EpochActionManagerTest) +{ + EpochActionManager actionManager(2U); + + bool isAction1Called = false; + bool isAction2Called = false; + + auto action1 = [&]() { isAction1Called = true; }; + auto action2 = [&]() { isAction2Called = true; }; + + // Register action1 and action2 at epoch count 5 and 6 respectively. + actionManager.RegisterAction(5U, action1); + actionManager.RegisterAction(6U, action2); + + BOOST_CHECK(!isAction1Called && !isAction2Called); + + actionManager.PerformActions(4); + BOOST_CHECK(!isAction1Called && !isAction2Called); + + actionManager.PerformActions(5); + BOOST_CHECK(!isAction1Called && !isAction2Called); + + actionManager.PerformActions(6); + BOOST_CHECK(isAction1Called && !isAction2Called); + + actionManager.PerformActions(7); + BOOST_CHECK(isAction1Called && isAction2Called); +} + + +BOOST_AUTO_TEST_CASE(EpochManagerTest) +{ + ServerPerfData perfData; + LocalMemory::EpochManager epochManager( + EpochManagerConfig(100000U, std::chrono::milliseconds(5U), 1U), + perfData); + + std::atomic isActionCalled = false; + auto action = [&]() { isActionCalled = true; }; + + auto epochCounterReferenced = epochManager.GetEpochRefManager().AddRef(); + + epochManager.RegisterAction(action); + + // Justification for using sleep_for in unit tests: + // - EpochManager already uses an internal thread which wakes up and perform a task + // in a given interval and when the class is destroyed, there is a mechanism for + // waiting for the thread anyway. It's more crucial to test the end to end scenario this way. + // - The overall execution time for this test is less than 50 milliseconds. + auto initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue); + while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2) + { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + } + + BOOST_CHECK(!isActionCalled); + + epochManager.GetEpochRefManager().RemoveRef(epochCounterReferenced); + + initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue); + while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2) + { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + } + + BOOST_CHECK(isActionCalled); +} + +BOOST_AUTO_TEST_SUITE_END() + +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/HashTableManagerTest.cpp b/Unittests/HashTableManagerTest.cpp new file mode 100644 index 0000000..35c126c --- /dev/null +++ b/Unittests/HashTableManagerTest.cpp @@ -0,0 +1,65 @@ +#include "stdafx.h" +#include "Utils.h" +#include "Mocks.h" +#include "L4/HashTable/Config.h" +#include "L4/HashTable/IHashTable.h" +#include "L4/LocalMemory/HashTableManager.h" + +namespace L4 +{ +namespace UnitTests +{ + +template +static void ValidateRecord( + const Store& store, + const char* expectedKeyStr, + const char* expectedValueStr) +{ + IReadOnlyHashTable::Value actualValue; + auto expectedValue = Utils::ConvertFromString(expectedValueStr); + BOOST_CHECK(store.Get(Utils::ConvertFromString(expectedKeyStr), actualValue)); + BOOST_CHECK(actualValue.m_size == expectedValue.m_size); + BOOST_CHECK(!memcmp(actualValue.m_data, expectedValue.m_data, expectedValue.m_size)); +} + +BOOST_AUTO_TEST_CASE(HashTableManagerTest) +{ + MockEpochManager epochManager; + PerfData perfData; + + LocalMemory::HashTableManager htManager; + const auto ht1Index = htManager.Add( + HashTableConfig("HashTable1", HashTableConfig::Setting(100U)), + epochManager, + std::allocator()); + const auto ht2Index = htManager.Add( + HashTableConfig("HashTable2", HashTableConfig::Setting(200U)), + epochManager, + std::allocator()); + + { + auto& hashTable1 = htManager.GetHashTable("HashTable1"); + hashTable1.Add( + Utils::ConvertFromString("HashTable1Key"), + Utils::ConvertFromString("HashTable1Value")); + + auto& hashTable2 = htManager.GetHashTable("HashTable2"); + hashTable2.Add( + Utils::ConvertFromString("HashTable2Key"), + Utils::ConvertFromString("HashTable2Value")); + } + + ValidateRecord( + htManager.GetHashTable(ht1Index), + "HashTable1Key", + "HashTable1Value"); + + ValidateRecord( + htManager.GetHashTable(ht2Index), + "HashTable2Key", + "HashTable2Value"); +} + +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/HashTableRecordTest.cpp b/Unittests/HashTableRecordTest.cpp new file mode 100644 index 0000000..3f678c1 --- /dev/null +++ b/Unittests/HashTableRecordTest.cpp @@ -0,0 +1,163 @@ +#include "stdafx.h" +#include "L4/HashTable/Common/Record.h" +#include "Utils.h" +#include +#include +#include + +namespace L4 +{ +namespace UnitTests +{ + +using namespace HashTable; + +class HashTableRecordTestFixture +{ +protected: + void Run(bool isFixedKey, bool isFixedValue, bool useMetaValue) + { + BOOST_TEST_MESSAGE( + "Running with isFixedKey=" << isFixedKey + << ", isFixedValue=" << isFixedValue + << ", useMetatValue=" << useMetaValue); + + const std::string key = "TestKey"; + const std::string value = "TestValue"; + const std::string metaValue = "TestMetavalue"; + + const auto recordOverhead = (isFixedKey ? 0U : c_keyTypeSize) + (isFixedValue ? 0U : c_valueTypeSize); + + Validate( + RecordSerializer{ + isFixedKey ? static_cast(key.size()) : 0U, + isFixedValue ? static_cast(value.size()) : 0U, + useMetaValue ? static_cast(metaValue.size()) : 0U }, + key, + value, + recordOverhead + key.size() + value.size() + (useMetaValue ? metaValue.size() : 0U), + recordOverhead, + useMetaValue ? boost::optional{ metaValue } : boost::none); + } + +private: + void Validate( + const RecordSerializer& serializer, + const std::string& keyStr, + const std::string& valueStr, + std::size_t expectedBufferSize, + std::size_t expectedRecordOverheadSize, + boost::optional metadataStr = boost::none) + { + BOOST_CHECK_EQUAL(serializer.CalculateRecordOverhead(), expectedRecordOverheadSize); + + const auto key = Utils::ConvertFromString(keyStr.c_str()); + const auto value = Utils::ConvertFromString(valueStr.c_str()); + + const auto bufferSize = serializer.CalculateBufferSize(key, value); + + BOOST_REQUIRE_EQUAL(bufferSize, expectedBufferSize); + std::vector buffer(bufferSize); + + RecordBuffer* recordBuffer = nullptr; + + if (metadataStr) + { + auto metaValue = Utils::ConvertFromString(metadataStr->c_str()); + recordBuffer = serializer.Serialize(key, value, metaValue, buffer.data(), bufferSize); + } + else + { + recordBuffer = serializer.Serialize(key, value, buffer.data(), bufferSize); + } + + const auto record = serializer.Deserialize(*recordBuffer); + + // Make sure the data serialized is in different memory location. + BOOST_CHECK(record.m_key.m_data != key.m_data); + BOOST_CHECK(record.m_value.m_data != value.m_data); + + BOOST_CHECK(record.m_key == key); + if (metadataStr) + { + const std::string newValueStr = *metadataStr + valueStr; + const auto newValue = Utils::ConvertFromString(newValueStr.c_str()); + BOOST_CHECK(record.m_value == newValue); + } + else + { + BOOST_CHECK(record.m_value == value); + } + } + + static constexpr std::size_t c_keyTypeSize = sizeof(Record::Key::size_type); + static constexpr std::size_t c_valueTypeSize = sizeof(Record::Value::size_type); +}; + +BOOST_FIXTURE_TEST_SUITE(HashTableRecordTests, HashTableRecordTestFixture) + +BOOST_AUTO_TEST_CASE(RunAll) +{ + // Run all permutations for Run(), which takes three booleans. + for (int i = 0; i < 8; ++i) + { + Run( + !!((i >> 2) & 1), + !!((i >> 1) & 1), + !!((i) & 1)); + } +} + + +BOOST_AUTO_TEST_CASE(InvalidSizeTest) +{ + std::vector buffer(100U); + + RecordSerializer serializer{ 4, 5 }; + + const std::string keyStr = "1234"; + const std::string invalidStr = "999999"; + const std::string valueStr = "12345"; + + const auto key = Utils::ConvertFromString(keyStr.c_str()); + const auto value = Utils::ConvertFromString(valueStr.c_str()); + + const auto invalidKey = Utils::ConvertFromString(invalidStr.c_str()); + const auto invalidValue = Utils::ConvertFromString(invalidStr.c_str()); + + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + serializer.Serialize(invalidKey, value, buffer.data(), buffer.size()), + "Invalid key or value sizes are given."); + + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + serializer.Serialize(key, invalidValue, buffer.data(), buffer.size()), + "Invalid key or value sizes are given."); + + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + serializer.Serialize(invalidKey, invalidValue, buffer.data(), buffer.size()), + "Invalid key or value sizes are given."); + + // Normal case shouldn't thrown an exception. + serializer.Serialize(key, value, buffer.data(), buffer.size()); + + RecordSerializer serializerWithMetaValue{ 4, 5, 2 }; + std::uint16_t metadata = 0; + + Record::Value metaValue{ + reinterpret_cast(&metadata), + sizeof(metadata) }; + + // Normal case shouldn't thrown an exception. + serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size()); + + // Mismatching size is given. + metaValue.m_size = 1; + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size()), + "Invalid meta value size is given."); +} + +BOOST_AUTO_TEST_SUITE_END() + +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/HashTableServiceTest.cpp b/Unittests/HashTableServiceTest.cpp new file mode 100644 index 0000000..2d15008 --- /dev/null +++ b/Unittests/HashTableServiceTest.cpp @@ -0,0 +1,52 @@ +#include "stdafx.h" +#include "Mocks.h" +#include "Utils.h" +#include "L4/LocalMemory/HashTableService.h" +#include +#include + +namespace L4 +{ +namespace UnitTests +{ + +BOOST_AUTO_TEST_CASE(HashTableServiceTest) +{ + std::vector> dataSet; + for (std::uint16_t i = 0U; i < 100; ++i) + { + dataSet.emplace_back("key" + std::to_string(i), "value" + std::to_string(i)); + } + + LocalMemory::HashTableService htService; + htService.AddHashTable( + HashTableConfig("Table1", HashTableConfig::Setting{ 100U })); + htService.AddHashTable( + HashTableConfig( + "Table2", + HashTableConfig::Setting{ 1000U }, + HashTableConfig::Cache{ 1024, std::chrono::seconds{ 1U }, false })); + + for (const auto& data : dataSet) + { + htService.GetContext()["Table1"].Add( + Utils::ConvertFromString(data.first.c_str()), + Utils::ConvertFromString(data.second.c_str())); + } + + // Smoke tests for looking up the data . + { + auto context = htService.GetContext(); + for (const auto& data : dataSet) + { + IReadOnlyHashTable::Value val; + BOOST_CHECK(context["Table1"].Get( + Utils::ConvertFromString(data.first.c_str()), + val)); + BOOST_CHECK(Utils::ConvertToString(val) == data.second); + } + } +} + +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/Mocks.h b/Unittests/Mocks.h new file mode 100644 index 0000000..efd1fe7 --- /dev/null +++ b/Unittests/Mocks.h @@ -0,0 +1,164 @@ +#pragma once + +#include "stdafx.h" +#include "L4/Epoch/IEpochActionManager.h" +#include "L4/Log/PerfLogger.h" +#include "L4/Serialization/IStream.h" + +namespace L4 +{ +namespace UnitTests +{ + +class MockPerfLogger : public IPerfLogger +{ + virtual void Log(const IData& data) override + { + (void)data; + } +}; + +struct MockEpochManager : public IEpochActionManager +{ + MockEpochManager() + : m_numRegisterActionsCalled(0) + { + } + + virtual void RegisterAction(Action&& action) override + { + ++m_numRegisterActionsCalled; + action(); + }; + + std::uint16_t m_numRegisterActionsCalled; +}; + +class StreamBase +{ +public: + using StreamBuffer = std::vector; + +protected: + StreamBase() = default; + + void Begin() + { + m_isBeginCalled = !m_isBeginCalled; + if (!m_isBeginCalled) + { + BOOST_FAIL("Begin() is called multiple times."); + } + } + + void End() + { + if (!m_isBeginCalled) + { + BOOST_FAIL("Begin() is not called yet."); + } + + m_isEndCalled = !m_isEndCalled; + if (!m_isEndCalled) + { + BOOST_FAIL("End() is called multiple times."); + } + } + + void Validate() + { + if (!m_isBeginCalled) + { + BOOST_FAIL("Begin() is not called yet."); + } + + if (m_isEndCalled) + { + BOOST_FAIL("End() is already called."); + } + } + + bool IsValid() const + { + return m_isBeginCalled && m_isEndCalled; + } + + bool m_isBeginCalled = false; + bool m_isEndCalled = false; +}; + + +class MockStreamWriter : public IStreamWriter, private StreamBase +{ +public: + virtual void Begin() override + { + StreamBase::Begin(); + } + + virtual void End() override + { + StreamBase::End(); + } + + virtual void Write(const std::uint8_t buffer[], std::size_t bufferSize) override + { + StreamBase::Validate(); + m_buffer.insert(m_buffer.end(), buffer, buffer + bufferSize); + } + + bool IsValid() const + { + return StreamBase::IsValid(); + } + + const StreamBuffer& GetStreamBuffer() const + { + return m_buffer; + } + +private: + StreamBuffer m_buffer; +}; + + +class MockStreamReader : public IStreamReader, private StreamBase +{ +public: + explicit MockStreamReader(const StreamBuffer& buffer) + : m_buffer(buffer), + m_bufferIter(m_buffer.cbegin()) + { + } + + virtual void Begin() override + { + StreamBase::Begin(); + } + + virtual void End() override + { + // Make sure every thing is read from stream. + BOOST_REQUIRE(m_bufferIter == m_buffer.end()); + StreamBase::End(); + } + + virtual void Read(std::uint8_t buffer[], std::size_t bufferSize) override + { + StreamBase::Validate(); + std::copy(m_bufferIter, m_bufferIter + bufferSize, buffer); + m_bufferIter += bufferSize; + } + + bool IsValid() const + { + return StreamBase::IsValid(); + } + +private: + StreamBuffer m_buffer; + StreamBuffer::const_iterator m_bufferIter; +}; + +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/PerfInfoTest.cpp b/Unittests/PerfInfoTest.cpp new file mode 100644 index 0000000..afc3cbc --- /dev/null +++ b/Unittests/PerfInfoTest.cpp @@ -0,0 +1,104 @@ +#include "stdafx.h" +#include "L4/Log/PerfLogger.h" +#include + +namespace L4 +{ +namespace LocalMemory +{ + +void CheckMinCounters(const HashTablePerfData& htPerfData) +{ + const auto maxValue = (std::numeric_limits::max)(); + /// Check if the min counter values are correctly initialized to max value. + BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinValueSize), maxValue); + BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinKeySize), maxValue); +} + +BOOST_AUTO_TEST_CASE(PerfCountersTest) +{ + enum class TestCounter + { + Counter = 0, + Count + }; + + PerfCounters perfCounters; + + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 0); + + perfCounters.Set(TestCounter::Counter, 10); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); + + perfCounters.Increment(TestCounter::Counter); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 11); + + perfCounters.Decrement(TestCounter::Counter); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); + + perfCounters.Add(TestCounter::Counter, 5); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 15); + + perfCounters.Subtract(TestCounter::Counter, 10); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 5); + + perfCounters.Max(TestCounter::Counter, 10); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); + + perfCounters.Max(TestCounter::Counter, 9); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10); + + perfCounters.Min(TestCounter::Counter, 1); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1); + + perfCounters.Min(TestCounter::Counter, 10); + BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1); +} + + +BOOST_AUTO_TEST_CASE(PerfDataTest) +{ + PerfData testPerfData; + + BOOST_CHECK(testPerfData.GetHashTablesPerfData().empty()); + + HashTablePerfData htPerfData1; + HashTablePerfData htPerfData2; + HashTablePerfData htPerfData3; + + CheckMinCounters(htPerfData1); + CheckMinCounters(htPerfData2); + CheckMinCounters(htPerfData3); + + testPerfData.AddHashTablePerfData("HT1", htPerfData1); + testPerfData.AddHashTablePerfData("HT2", htPerfData2); + testPerfData.AddHashTablePerfData("HT3", htPerfData3); + + /// Update counters and check if they are correctly updated. + htPerfData1.Set(HashTablePerfCounter::TotalKeySize, 10); + htPerfData2.Set(HashTablePerfCounter::TotalKeySize, 20); + htPerfData3.Set(HashTablePerfCounter::TotalKeySize, 30); + + // Check if the hash table perf data is correctly registered. + const auto& hashTablesPerfData = testPerfData.GetHashTablesPerfData(); + BOOST_CHECK_EQUAL(hashTablesPerfData.size(), 3U); + + { + auto htPerfDataIt = hashTablesPerfData.find("HT1"); + BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); + BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 10); + } + { + auto htPerfDataIt = hashTablesPerfData.find("HT2"); + BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); + BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 20); + } + { + auto htPerfDataIt = hashTablesPerfData.find("HT3"); + BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end()); + BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 30); + } +} + +} // namespace LocalMemory +} // namespace L4 \ No newline at end of file diff --git a/Unittests/ReadWriteHashTableSerializerTest.cpp b/Unittests/ReadWriteHashTableSerializerTest.cpp new file mode 100644 index 0000000..d5c787c --- /dev/null +++ b/Unittests/ReadWriteHashTableSerializerTest.cpp @@ -0,0 +1,220 @@ +#include "stdafx.h" +#include "Utils.h" +#include "Mocks.h" +#include "L4/HashTable/ReadWrite/HashTable.h" +#include "L4/HashTable/ReadWrite/Serializer.h" +#include "L4/Log/PerfCounter.h" +#include +#include + +namespace L4 +{ +namespace UnitTests +{ + +class LocalMemory +{ +public: + template + using Allocator = typename std::allocator; + + template + using Deleter = typename std::default_delete; + + template + using UniquePtr = std::unique_ptr; + + LocalMemory() = default; + + template + auto MakeUnique(Args&&... args) + { + return std::make_unique(std::forward(args)...); + } + + template + auto GetAllocator() + { + return Allocator(); + } + + template + auto GetDeleter() + { + return Deleter(); + } + + LocalMemory(const LocalMemory&) = delete; + LocalMemory& operator=(const LocalMemory&) = delete; +}; + +using namespace HashTable::ReadWrite; + +BOOST_AUTO_TEST_SUITE(HashTableSerializerTests) + +using KeyValuePair = std::pair; +using KeyValuePairs = std::vector; +using Memory = LocalMemory; +using Allocator = typename Memory:: template Allocator<>; +using HashTable = WritableHashTable::HashTable; + +template +void ValidateSerializer( + const Serializer& serializer, + const Deserializer& deserializer, + std::uint8_t serializerVersion, + const KeyValuePairs& keyValuePairs, + const Utils::ExpectedCounterValues& expectedCounterValuesAfterLoad, + const Utils::ExpectedCounterValues& expectedCounterValuesAfterSerialization, + const Utils::ExpectedCounterValues& expectedCounterValuesAfterDeserialization) +{ + LocalMemory memory; + MockEpochManager epochManager; + + auto hashTableHolder{ + memory.MakeUnique( + HashTable::Setting{ 5 }, memory.GetAllocator()) }; + BOOST_CHECK(hashTableHolder != nullptr); + + WritableHashTable writableHashTable(*hashTableHolder, epochManager); + + // Insert the given key/value pairs to the hash table. + for (const auto& pair : keyValuePairs) + { + auto key = Utils::ConvertFromString(pair.first.c_str()); + auto val = Utils::ConvertFromString(pair.second.c_str()); + + writableHashTable.Add(key, val); + } + + const auto& perfData = writableHashTable.GetPerfData(); + + Utils::ValidateCounters(perfData, expectedCounterValuesAfterLoad); + + // Now write the hash table to the stream. + MockStreamWriter writer; + BOOST_CHECK(!writer.IsValid()); + serializer.Serialize(*hashTableHolder, writer); + BOOST_CHECK(writer.IsValid()); + Utils::ValidateCounters(perfData, expectedCounterValuesAfterSerialization); + + // Read in the hash table from the stream and validate it. + MockStreamReader reader(writer.GetStreamBuffer()); + + // version == 0 means that it's run through the HashTableSerializer, thus the following can be skipped. + if (serializerVersion != 0) + { + std::uint8_t actualSerializerVersion = 0; + reader.Begin(); + reader.Read(&actualSerializerVersion, sizeof(actualSerializerVersion)); + BOOST_CHECK(actualSerializerVersion == serializerVersion); + } + else + { + BOOST_REQUIRE(typeid(L4::HashTable::ReadWrite::Serializer) == typeid(Serializer)); + } + + BOOST_CHECK(!reader.IsValid()); + + auto newHashTableHolder = deserializer.Deserialize(memory, reader); + + BOOST_CHECK(reader.IsValid()); + BOOST_CHECK(newHashTableHolder != nullptr); + + WritableHashTable newWritableHashTable(*newHashTableHolder, epochManager); + + const auto& newPerfData = newWritableHashTable.GetPerfData(); + + Utils::ValidateCounters(newPerfData, expectedCounterValuesAfterDeserialization); + + // Make sure all the key/value pairs exist after deserialization. + for (const auto& pair : keyValuePairs) + { + auto key = Utils::ConvertFromString(pair.first.c_str()); + IReadOnlyHashTable::Value val; + BOOST_CHECK(newWritableHashTable.Get(key, val)); + BOOST_CHECK(Utils::ConvertToString(val) == pair.second); + } +} + + +BOOST_AUTO_TEST_CASE(CurrentSerializerTest) +{ + ValidateSerializer( + Current::Serializer{}, + Current::Deserializer{ L4::Utils::Properties{} }, + Current::c_version, + { + { "hello1", " world1" }, + { "hello2", " world2" }, + { "hello3", " world3" } + }, + { + { HashTablePerfCounter::RecordsCount, 3 }, + { HashTablePerfCounter::BucketsCount, 5 }, + { HashTablePerfCounter::TotalKeySize, 18 }, + { HashTablePerfCounter::TotalValueSize, 21 }, + { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 }, + { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 } + }, + { + { HashTablePerfCounter::RecordsCount, 3 }, + { HashTablePerfCounter::BucketsCount, 5 }, + { HashTablePerfCounter::TotalKeySize, 18 }, + { HashTablePerfCounter::TotalValueSize, 21 }, + { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 }, + { HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 } + }, + { + { HashTablePerfCounter::RecordsCount, 3 }, + { HashTablePerfCounter::BucketsCount, 5 }, + { HashTablePerfCounter::TotalKeySize, 18 }, + { HashTablePerfCounter::TotalValueSize, 21 }, + { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 }, + { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 } + }); +} + + +BOOST_AUTO_TEST_CASE(HashTableSerializeTest) +{ + // This test case tests end to end scenario using the HashTableSerializer. + ValidateSerializer( + Serializer{}, + Deserializer{ L4::Utils::Properties{} }, + 0U, + { + { "hello1", " world1" }, + { "hello2", " world2" }, + { "hello3", " world3" } + }, + { + { HashTablePerfCounter::RecordsCount, 3 }, + { HashTablePerfCounter::BucketsCount, 5 }, + { HashTablePerfCounter::TotalKeySize, 18 }, + { HashTablePerfCounter::TotalValueSize, 21 }, + { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 }, + { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 } + }, + { + { HashTablePerfCounter::RecordsCount, 3 }, + { HashTablePerfCounter::BucketsCount, 5 }, + { HashTablePerfCounter::TotalKeySize, 18 }, + { HashTablePerfCounter::TotalValueSize, 21 }, + { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 }, + { HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 } + }, + { + { HashTablePerfCounter::RecordsCount, 3 }, + { HashTablePerfCounter::BucketsCount, 5 }, + { HashTablePerfCounter::TotalKeySize, 18 }, + { HashTablePerfCounter::TotalValueSize, 21 }, + { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 }, + { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 } + }); +} + +BOOST_AUTO_TEST_SUITE_END() + +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/ReadWriteHashTableTest.cpp b/Unittests/ReadWriteHashTableTest.cpp new file mode 100644 index 0000000..84636e7 --- /dev/null +++ b/Unittests/ReadWriteHashTableTest.cpp @@ -0,0 +1,676 @@ +#include "stdafx.h" +#include "Utils.h" +#include "Mocks.h" +#include "CheckedAllocator.h" +#include "L4/Log/PerfCounter.h" +#include "L4/HashTable/ReadWrite/HashTable.h" + +namespace L4 +{ +namespace UnitTests +{ + +using namespace HashTable::ReadWrite; + +class ReadWriteHashTableTestFixture +{ +protected: + using Allocator = CheckedAllocator<>; + using HashTable = WritableHashTable::HashTable; + + ReadWriteHashTableTestFixture() + : m_allocator{} + , m_epochManager{} + {} + + Allocator m_allocator; + MockEpochManager m_epochManager; +}; + + +BOOST_FIXTURE_TEST_SUITE(ReadWriteHashTableTests, ReadWriteHashTableTestFixture) + + +BOOST_AUTO_TEST_CASE(HashTableTest) +{ + HashTable hashTable{ HashTable::Setting{ 100, 5 }, m_allocator }; + WritableHashTable writableHashTable(hashTable, m_epochManager); + ReadOnlyHashTable readOnlyHashTable(hashTable); + + const auto& perfData = writableHashTable.GetPerfData(); + + { + // Check empty data. + + std::string keyStr = "hello"; + auto key = Utils::ConvertFromString(keyStr.c_str()); + + IReadOnlyHashTable::Value data; + BOOST_CHECK(!readOnlyHashTable.Get(key, data)); + + const auto c_counterMaxValue = (std::numeric_limits::max)(); + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 0 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::ChainingEntriesCount, 0 }, + { HashTablePerfCounter::TotalKeySize, 0 }, + { HashTablePerfCounter::TotalValueSize, 0 }, + { HashTablePerfCounter::MinKeySize, c_counterMaxValue }, + { HashTablePerfCounter::MaxKeySize, 0 }, + { HashTablePerfCounter::MinValueSize, c_counterMaxValue }, + { HashTablePerfCounter::MaxValueSize, 0 } + }); + } + + + { + // First record added. + std::string keyStr = "hello"; + std::string valStr = "world"; + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 1 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::ChainingEntriesCount, 0 }, + { HashTablePerfCounter::TotalKeySize, 5 }, + { HashTablePerfCounter::TotalValueSize, 5 }, + { HashTablePerfCounter::MinKeySize, 5 }, + { HashTablePerfCounter::MaxKeySize, 5 }, + { HashTablePerfCounter::MinValueSize, 5 }, + { HashTablePerfCounter::MaxValueSize, 5 } + }); + } + + { + // Second record added. + std::string keyStr = "hello2"; + std::string valStr = "world2"; + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 2 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::ChainingEntriesCount, 0 }, + { HashTablePerfCounter::TotalKeySize, 11 }, + { HashTablePerfCounter::TotalValueSize, 11 }, + { HashTablePerfCounter::MinKeySize, 5 }, + { HashTablePerfCounter::MaxKeySize, 6 }, + { HashTablePerfCounter::MinValueSize, 5 }, + { HashTablePerfCounter::MaxValueSize, 6 } + }); + } + + { + // Update the key with value bigger than the existing values. + std::string keyStr = "hello"; + std::string valStr = "world long string"; + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 1); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 2 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::ChainingEntriesCount, 0 }, + { HashTablePerfCounter::TotalKeySize, 11 }, + { HashTablePerfCounter::TotalValueSize, 23 }, + { HashTablePerfCounter::MinKeySize, 5 }, + { HashTablePerfCounter::MaxKeySize, 6 }, + { HashTablePerfCounter::MinValueSize, 5 }, + { HashTablePerfCounter::MaxValueSize, 17 } + }); + } + + { + // Update the key with value smaller than the existing values. + std::string keyStr = "hello2"; + std::string valStr = "wo"; + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 2); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 2 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::ChainingEntriesCount, 0 }, + { HashTablePerfCounter::TotalKeySize, 11 }, + { HashTablePerfCounter::TotalValueSize, 19 }, + { HashTablePerfCounter::MinKeySize, 5 }, + { HashTablePerfCounter::MaxKeySize, 6 }, + { HashTablePerfCounter::MinValueSize, 2 }, + { HashTablePerfCounter::MaxValueSize, 17 } + }); + } + + { + // Remove the first key. + std::string keyStr = "hello"; + std::string valStr = ""; + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + BOOST_CHECK(writableHashTable.Remove(key)); + BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 3); + + // Note that the Remove() doesn't change Min/Max counters by design. + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 1 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::ChainingEntriesCount, 0 }, + { HashTablePerfCounter::TotalKeySize, 6 }, + { HashTablePerfCounter::TotalValueSize, 2 }, + { HashTablePerfCounter::MinKeySize, 5 }, + { HashTablePerfCounter::MaxKeySize, 6 }, + { HashTablePerfCounter::MinValueSize, 2 }, + { HashTablePerfCounter::MaxValueSize, 17 } + }); + + // Remove the second key. + keyStr = "hello2"; + key = Utils::ConvertFromString(keyStr.c_str()); + + BOOST_CHECK(writableHashTable.Remove(key)); + BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 4); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 0 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::ChainingEntriesCount, 0 }, + { HashTablePerfCounter::TotalKeySize, 0 }, + { HashTablePerfCounter::TotalValueSize, 0 }, + { HashTablePerfCounter::MinKeySize, 5 }, + { HashTablePerfCounter::MaxKeySize, 6 }, + { HashTablePerfCounter::MinValueSize, 2 }, + { HashTablePerfCounter::MaxValueSize, 17 } + }); + + // Removing the key that doesn't exist. + BOOST_CHECK(!writableHashTable.Remove(key)); + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 0 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::ChainingEntriesCount, 0 }, + { HashTablePerfCounter::TotalKeySize, 0 }, + { HashTablePerfCounter::TotalValueSize, 0 }, + { HashTablePerfCounter::MinKeySize, 5 }, + { HashTablePerfCounter::MaxKeySize, 6 }, + { HashTablePerfCounter::MinValueSize, 2 }, + { HashTablePerfCounter::MaxValueSize, 17 } + }); + } +} + + +BOOST_AUTO_TEST_CASE(HashTableWithOneBucketTest) +{ + Allocator allocator; + HashTable hashTable{ HashTable::Setting{ 1 }, allocator }; + WritableHashTable writableHashTable(hashTable, m_epochManager); + ReadOnlyHashTable readOnlyHashTable(hashTable); + + const auto& perfData = writableHashTable.GetPerfData(); + + Utils::ValidateCounters(perfData, { { HashTablePerfCounter::ChainingEntriesCount, 0 } }); + + const auto initialTotalIndexSize = perfData.Get(HashTablePerfCounter::TotalIndexSize); + const std::size_t c_dataSetSize = HashTable::Entry::c_numDataPerEntry + 5U; + + std::size_t expectedTotalKeySize = 0U; + std::size_t expectedTotalValueSize = 0U; + + for (auto i = 0U; i < c_dataSetSize; ++i) + { + std::stringstream keyStream; + keyStream << "key" << i; + + std::stringstream valStream; + valStream << "value" << i; + + std::string keyStr = keyStream.str(); + std::string valStr = valStream.str(); + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + expectedTotalKeySize += key.m_size; + expectedTotalValueSize += val.m_size; + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + } + + using L4::HashTable::RecordSerializer; + + // Variable key/value sizes. + const auto recordOverhead = RecordSerializer{ 0U, 0U }.CalculateRecordOverhead(); + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, c_dataSetSize }, + { HashTablePerfCounter::BucketsCount, 1 }, + { HashTablePerfCounter::MaxBucketChainLength, 2 }, + { HashTablePerfCounter::ChainingEntriesCount, 1 }, + { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize }, + { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize }, + { + HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead) + } + }); + + // Now replace with new values. + expectedTotalValueSize = 0U; + + for (auto i = 0U; i < c_dataSetSize; ++i) + { + std::stringstream keyStream; + keyStream << "key" << i; + + std::stringstream valStream; + valStream << "val" << i; + + std::string keyStr = keyStream.str(); + std::string valStr = valStream.str(); + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + expectedTotalValueSize += val.m_size; + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + } + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, c_dataSetSize }, + { HashTablePerfCounter::BucketsCount, 1 }, + { HashTablePerfCounter::MaxBucketChainLength, 2 }, + { HashTablePerfCounter::ChainingEntriesCount, 1 }, + { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize }, + { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize }, + { + HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead) + } + }); + + // Now remove all key-value. + for (auto i = 0U; i < c_dataSetSize; ++i) + { + std::stringstream keyStream; + keyStream << "key" << i; + + std::string keyStr = keyStream.str(); + auto key = Utils::ConvertFromString(keyStr.c_str()); + + BOOST_CHECK(writableHashTable.Remove(key)); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(!readOnlyHashTable.Get(key, value)); + } + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, 0 }, + { HashTablePerfCounter::BucketsCount, 1 }, + { HashTablePerfCounter::MaxBucketChainLength, 2 }, + { HashTablePerfCounter::ChainingEntriesCount, 1 }, + { HashTablePerfCounter::TotalKeySize, 0 }, + { HashTablePerfCounter::TotalValueSize, 0 }, + { + HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + sizeof(HashTable::Entry) + } + }); + + // Try to add back to the same bucket (reusing existing entries) + expectedTotalKeySize = 0U; + expectedTotalValueSize = 0U; + + for (auto i = 0U; i < c_dataSetSize; ++i) + { + std::stringstream keyStream; + keyStream << "key" << i; + + std::stringstream valStream; + valStream << "value" << i; + + std::string keyStr = keyStream.str(); + std::string valStr = valStream.str(); + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + expectedTotalKeySize += key.m_size; + expectedTotalValueSize += val.m_size; + + writableHashTable.Add(key, val); + + IReadOnlyHashTable::Value value; + BOOST_CHECK(readOnlyHashTable.Get(key, value)); + BOOST_CHECK(value.m_size == valStr.size()); + BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size())); + } + + Utils::ValidateCounters( + perfData, + { + { HashTablePerfCounter::RecordsCount, c_dataSetSize }, + { HashTablePerfCounter::BucketsCount, 1 }, + { HashTablePerfCounter::MaxBucketChainLength, 2 }, + { HashTablePerfCounter::ChainingEntriesCount, 1 }, + { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize }, + { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize }, + { + HashTablePerfCounter::TotalIndexSize, + initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead) + } + }); +} + + +BOOST_AUTO_TEST_CASE(AddRemoveSameKeyTest) +{ + HashTable hashTable{ HashTable::Setting{ 100, 5 }, m_allocator }; + WritableHashTable writableHashTable(hashTable, m_epochManager); + ReadOnlyHashTable readOnlyHashTable(hashTable); + + // Add two key/value pairs. + auto key1 = Utils::ConvertFromString("key1"); + auto val1 = Utils::ConvertFromString("val1"); + writableHashTable.Add(key1, val1); + + IReadOnlyHashTable::Value valueRetrieved; + BOOST_CHECK(readOnlyHashTable.Get(key1, valueRetrieved)); + BOOST_CHECK(valueRetrieved.m_size == val1.m_size); + BOOST_CHECK(!memcmp(valueRetrieved.m_data, val1.m_data, val1.m_size)); + + auto key2 = Utils::ConvertFromString("key2"); + auto val2 = Utils::ConvertFromString("val2"); + writableHashTable.Add(key2, val2); + + BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved)); + BOOST_CHECK(valueRetrieved.m_size == val2.m_size); + BOOST_CHECK(!memcmp(valueRetrieved.m_data, val2.m_data, val2.m_size)); + + const auto& perfData = writableHashTable.GetPerfData(); + + // Now remove the first record with key = "key1", which is at the head of the chain. + BOOST_CHECK(writableHashTable.Remove(key1)); + BOOST_CHECK(!readOnlyHashTable.Get(key1, valueRetrieved)); + Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1); + + // Now try update the record with key = "key2". This should correctly update the existing record + // instead of using the empty slot created by removing the record with key = "key1". + auto newVal2 = Utils::ConvertFromString("newVal2"); + writableHashTable.Add(key2, newVal2); + + BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved)); + BOOST_CHECK(valueRetrieved.m_size == newVal2.m_size); + BOOST_CHECK(!memcmp(valueRetrieved.m_data, newVal2.m_data, newVal2.m_size)); + Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1); + + // Remove the record with key = "key2". + BOOST_CHECK(writableHashTable.Remove(key2)); + BOOST_CHECK(!writableHashTable.Remove(key2)); + Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 0); +} + + +BOOST_AUTO_TEST_CASE(FixedKeyValueHashTableTest) +{ + // Fixed 4 byte keys and 6 byte values. + std::vector settings = + { + HashTable::Setting{ 100, 200, 4, 0 }, + HashTable::Setting{ 100, 200, 0, 6 }, + HashTable::Setting{ 100, 200, 4, 6 } + }; + + for (const auto& setting : settings) + { + HashTable hashTable{ setting, m_allocator }; + WritableHashTable writableHashTable(hashTable, m_epochManager); + ReadOnlyHashTable readOnlyHashTable(hashTable); + + constexpr std::uint8_t c_numRecords = 10; + + for (std::uint8_t i = 0; i < c_numRecords; ++i) + { + const std::string keyStr = "key" + std::to_string(i); + const std::string valueStr = "value" + std::to_string(i); + + writableHashTable.Add( + Utils::ConvertFromString(keyStr.c_str()), + Utils::ConvertFromString(valueStr.c_str())); + } + + Utils::ValidateCounters( + writableHashTable.GetPerfData(), + { + { HashTablePerfCounter::RecordsCount, 10 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::TotalKeySize, 40 }, + { HashTablePerfCounter::TotalValueSize, 60 }, + { HashTablePerfCounter::MinKeySize, 4 }, + { HashTablePerfCounter::MaxKeySize, 4 }, + { HashTablePerfCounter::MinValueSize, 6 }, + { HashTablePerfCounter::MaxValueSize, 6 } + }); + + for (std::uint8_t i = 0; i < c_numRecords; ++i) + { + const std::string keyStr = "key" + std::to_string(i); + const std::string valueStr = "value" + std::to_string(i); + const auto expectedValue = Utils::ConvertFromString(valueStr.c_str()); + + IReadOnlyHashTable::Value actualValue; + BOOST_CHECK(readOnlyHashTable.Get( + Utils::ConvertFromString(keyStr.c_str()), + actualValue)); + BOOST_CHECK(expectedValue == actualValue); + } + + for (std::uint8_t i = 0; i < c_numRecords; ++i) + { + const std::string keyStr = "key" + std::to_string(i); + writableHashTable.Remove( + Utils::ConvertFromString(keyStr.c_str())); + } + + Utils::ValidateCounters( + writableHashTable.GetPerfData(), + { + { HashTablePerfCounter::RecordsCount, 0 }, + { HashTablePerfCounter::BucketsCount, 100 }, + { HashTablePerfCounter::TotalKeySize, 0 }, + { HashTablePerfCounter::TotalValueSize, 0 } + }); + } +} + + +BOOST_AUTO_TEST_CASE(HashTableIteratorTest) +{ + Allocator allocator; + constexpr std::uint32_t c_numBuckets = 10; + HashTable hashTable{ HashTable::Setting{ c_numBuckets }, allocator }; + WritableHashTable writableHashTable(hashTable, m_epochManager); + + { + // Empty data set, thus iterator cannot move. + auto iter = writableHashTable.GetIterator(); + BOOST_CHECK(!iter->MoveNext()); + + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + iter->GetKey(), + "HashTableIterator is not correctly used."); + + CHECK_EXCEPTION_THROWN_WITH_MESSAGE( + iter->GetValue(), + "HashTableIterator is not correctly used."); + } + + using Buffer = std::vector; + using BufferMap = std::map; + + BufferMap keyValueMap; + + // The number of records should be such that it will create chained entries + // for at least one bucket. So it should be greater than HashTable::Entry::c_numDataPerEntry * number of buckets. + constexpr std::uint32_t c_numRecords = (HashTable::Entry::c_numDataPerEntry * c_numBuckets) + 1; + + for (auto i = 0U; i < c_numRecords; ++i) + { + std::stringstream keyStream; + keyStream << "key" << i; + + std::stringstream valStream; + valStream << "value" << i; + + std::string keyStr = keyStream.str(); + std::string valStr = valStream.str(); + + auto key = Utils::ConvertFromString(keyStr.c_str()); + auto val = Utils::ConvertFromString(valStr.c_str()); + + writableHashTable.Add(key, val); + + keyValueMap[Buffer(key.m_data, key.m_data + key.m_size)] = Buffer(val.m_data, val.m_data + val.m_size); + } + + BOOST_REQUIRE(writableHashTable.GetPerfData().Get(HashTablePerfCounter::MaxBucketChainLength) >= 2); + BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords); + + { + BufferMap keyValueMapFromIterator; + + // Validate the data using the iterator. + auto iter = writableHashTable.GetIterator(); + for (auto i = 0U; i < c_numRecords; ++i) + { + BOOST_CHECK(iter->MoveNext()); + + const auto& key = iter->GetKey(); + const auto& val = iter->GetValue(); + + keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] = Buffer(val.m_data, val.m_data + val.m_size); + } + BOOST_CHECK(!iter->MoveNext()); + BOOST_CHECK(keyValueMap == keyValueMapFromIterator); + + // Reset should move the iterator to the beginning. + iter->Reset(); + for (auto i = 0U; i < c_numRecords; ++i) + { + BOOST_CHECK(iter->MoveNext()); + } + BOOST_CHECK(!iter->MoveNext()); + } + + // Remove half of the key. + for (auto i = 0U; i < c_numRecords; ++i) + { + if (i % 2 == 0U) + { + std::stringstream keyStream; + keyStream << "key" << i; + + std::string keyStr = keyStream.str(); + auto key = Utils::ConvertFromString(keyStr.c_str()); + + BOOST_CHECK(writableHashTable.Remove(key)); + + keyValueMap.erase(Buffer(key.m_data, key.m_data + key.m_size)); + } + } + + BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords / 2U); + + // Validate only the existing keys are iterated. + { + BufferMap keyValueMapFromIterator; + auto iter = writableHashTable.GetIterator(); + for (auto i = 0U; i < c_numRecords / 2U; ++i) + { + BOOST_CHECK(iter->MoveNext()); + + const auto& key = iter->GetKey(); + const auto& val = iter->GetValue(); + + keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] = + Buffer(val.m_data, val.m_data + val.m_size); + } + BOOST_CHECK(!iter->MoveNext()); + BOOST_CHECK(keyValueMap == keyValueMapFromIterator); + } +} + +BOOST_AUTO_TEST_SUITE_END() + +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/SettingAdapterTest.cpp b/Unittests/SettingAdapterTest.cpp new file mode 100644 index 0000000..1412ff6 --- /dev/null +++ b/Unittests/SettingAdapterTest.cpp @@ -0,0 +1,41 @@ +#include "stdafx.h" +#include "L4/HashTable/Common/SettingAdapter.h" +#include "L4/HashTable/Common/Record.h" +#include "CheckedAllocator.h" + +namespace L4 +{ +namespace UnitTests +{ + +using SharedHashTable = HashTable::SharedHashTable>; + +BOOST_AUTO_TEST_SUITE(SettingAdapterTests) + +BOOST_AUTO_TEST_CASE(SettingAdapterTestWithDefaultValues) +{ + HashTableConfig::Setting from{ 100U }; + const auto to = HashTable::SettingAdapter{}.Convert(from); + + BOOST_CHECK_EQUAL(to.m_numBuckets, 100U); + BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 1U); + BOOST_CHECK_EQUAL(to.m_fixedKeySize, 0U); + BOOST_CHECK_EQUAL(to.m_fixedValueSize, 0U); +} + + +BOOST_AUTO_TEST_CASE(SettingAdapterTestWithNonDefaultValues) +{ + HashTableConfig::Setting from{ 100U, 10U, 5U, 20U }; + const auto to = HashTable::SettingAdapter{}.Convert(from); + + BOOST_CHECK_EQUAL(to.m_numBuckets, 100U); + BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 10U); + BOOST_CHECK_EQUAL(to.m_fixedKeySize, 5U); + BOOST_CHECK_EQUAL(to.m_fixedValueSize, 20U); +} + +BOOST_AUTO_TEST_SUITE_END() + +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/Unittests.vcxproj b/Unittests/Unittests.vcxproj new file mode 100644 index 0000000..2224010 --- /dev/null +++ b/Unittests/Unittests.vcxproj @@ -0,0 +1,94 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {8122529E-61CB-430B-A089-B12E63FC361B} + + + + Application + + + true + v140 + + + false + v140 + + + + L4.UnitTests + + + + Console + netapi32.lib;%(AdditionalDependencies) + true + + + MachineX64 + + + $(SolutionDir)Unittests;$(SolutionDir)inc;$(SolutionDir)inc/L4;%(AdditionalIncludeDirectories) + Use + /Zm136 %(AdditionalOptions) + _SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MaxSpeed + AnySuitable + true + 4482;%(DisableSpecificWarnings) + + + + + + + + + + + + + + Create + + + + + + + + + + + + + + + + {b7846115-88f1-470b-a625-9de0c29229bb} + + + + + + + This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + + + + + \ No newline at end of file diff --git a/Unittests/Unittests.vcxproj.filters b/Unittests/Unittests.vcxproj.filters new file mode 100644 index 0000000..e1af83e --- /dev/null +++ b/Unittests/Unittests.vcxproj.filters @@ -0,0 +1,72 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + + + + \ No newline at end of file diff --git a/Unittests/Utils.cpp b/Unittests/Utils.cpp new file mode 100644 index 0000000..607e367 --- /dev/null +++ b/Unittests/Utils.cpp @@ -0,0 +1,37 @@ +#include "stdafx.h" +#include "Utils.h" + +namespace L4 +{ +namespace UnitTests +{ +namespace Utils +{ + + +void ValidateCounter( + const HashTablePerfData& actual, + HashTablePerfCounter perfCounter, + PerfCounters::TValue expectedValue) +{ + BOOST_CHECK_MESSAGE( + actual.Get(perfCounter) == expectedValue, + c_hashTablePerfCounterNames[static_cast(perfCounter)] + << " counter: " + << actual.Get(perfCounter) + << " (actual) != " << expectedValue << " (expected)."); +} + +void ValidateCounters( + const HashTablePerfData& actual, + const ExpectedCounterValues& expected) +{ + for (const auto& expectedCounter : expected) + { + ValidateCounter(actual, expectedCounter.first, expectedCounter.second); + } +} + +} // namespace Utils +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/Utils.h b/Unittests/Utils.h new file mode 100644 index 0000000..15a3047 --- /dev/null +++ b/Unittests/Utils.h @@ -0,0 +1,105 @@ +#pragma once + +#include +#include +#include +#include "L4/Log/PerfCounter.h" +#include "L4/Utils/Exception.h" + +namespace L4 +{ +namespace UnitTests +{ + +// Macro CHECK_EXCEPTION_THROWN + +#define CHECK_EXCEPTION_THROWN(statement) \ +do { \ + bool isExceptionThrown = false;\ + try \ + { \ + statement; \ + } \ + catch (const RuntimeException&) \ + { \ + isExceptionThrown = true; \ + } \ + BOOST_CHECK(isExceptionThrown); \ +} while (0) + + +#define CHECK_EXCEPTION_THROWN_WITH_MESSAGE(statement, message) \ +do { \ + bool isExceptionThrown = false; \ + std::string exceptionMsg; \ + try \ + { \ + statement; \ + } \ + catch (const RuntimeException& ex) \ + { \ + isExceptionThrown = true; \ + exceptionMsg = ex.what(); \ + BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \ + } \ + BOOST_CHECK(isExceptionThrown); \ + BOOST_CHECK(strcmp((message), exceptionMsg.c_str()) == 0); \ +} while (0) + + +// This will validate the given message is a prefix of the exception message. +#define CHECK_EXCEPTION_THROWN_WITH_PREFIX_MESSAGE(statement, message) \ +do { \ + bool isExceptionThrown = false; \ + std::string exceptionMsg; \ + try \ + { \ + statement; \ + } \ + catch (const RuntimeException& ex) \ + { \ + isExceptionThrown = true; \ + exceptionMsg = ex.what(); \ + BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \ + } \ + BOOST_CHECK(isExceptionThrown); \ + BOOST_CHECK(exceptionMsg.compare(0, strlen(message), message) == 0); \ +} while (0) + + +namespace Utils +{ + +template +T ConvertFromString(const char* str) +{ + return T( + reinterpret_cast(str), + static_cast(strlen(str))); +} + +template +std::string ConvertToString(const T& t) +{ + return std::string(reinterpret_cast(t.m_data), t.m_size); +} + + +// Counter related validation util function. + +using ExpectedCounterValues = std::vector::TValue>>; + +// Validate the given perfData against the expected counter value. +void ValidateCounter( + const HashTablePerfData& actual, + HashTablePerfCounter perfCounter, + PerfCounters::TValue expectedValue); + +// Validate the given perfData against the expected counter values. +void ValidateCounters( + const HashTablePerfData& actual, + const ExpectedCounterValues& expected); + +} // namespace Utils +} // namespace UnitTests +} // namespace L4 \ No newline at end of file diff --git a/Unittests/UtilsTest.cpp b/Unittests/UtilsTest.cpp new file mode 100644 index 0000000..25bce31 --- /dev/null +++ b/Unittests/UtilsTest.cpp @@ -0,0 +1,54 @@ +#include "stdafx.h" +#include "L4/Utils/Math.h" +#include + +namespace L4 +{ +namespace UnitTests +{ + +using namespace Utils; + +BOOST_AUTO_TEST_CASE(MathTest) +{ + // RoundUp tests. + BOOST_CHECK_EQUAL(Math::RoundUp(5, 10), 10); + BOOST_CHECK_EQUAL(Math::RoundUp(10, 10), 10); + BOOST_CHECK_EQUAL(Math::RoundUp(11, 10), 20); + BOOST_CHECK_EQUAL(Math::RoundUp(5, 0), 5); + + // RoundDown tests. + BOOST_CHECK_EQUAL(Math::RoundDown(5, 10), 0); + BOOST_CHECK_EQUAL(Math::RoundDown(10, 10), 10); + BOOST_CHECK_EQUAL(Math::RoundDown(11, 10), 10); + BOOST_CHECK_EQUAL(Math::RoundDown(5, 0), 5); + + // IsPowerOfTwo tests. + BOOST_CHECK(Math::IsPowerOfTwo(2)); + BOOST_CHECK(Math::IsPowerOfTwo(4)); + BOOST_CHECK(!Math::IsPowerOfTwo(3)); + BOOST_CHECK(!Math::IsPowerOfTwo(0)); + + // NextHighestPowerOfTwo tests. + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(0), 0U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(1), 1U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(2), 2U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(3), 4U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(4), 4U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(5), 8U); + BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(200), 256U); +} + + +BOOST_AUTO_TEST_CASE(PointerArithmeticTest) +{ + std::array elements; + + BOOST_CHECK(reinterpret_cast(Math::PointerArithmetic::Add(&elements[0], sizeof(int))) == &elements[1]); + BOOST_CHECK(reinterpret_cast(Math::PointerArithmetic::Subtract(&elements[1], sizeof(int))) == &elements[0]); + BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[2], &elements[0]) == sizeof(int) * 2U); + BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[0], &elements[2]) == sizeof(int) * 2U); +} + +} // namespace UnitTests +} // namespace L4 diff --git a/Unittests/packages.config b/Unittests/packages.config new file mode 100644 index 0000000..947c4d1 --- /dev/null +++ b/Unittests/packages.config @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/Unittests/stdafx.cpp b/Unittests/stdafx.cpp new file mode 100644 index 0000000..fd4f341 --- /dev/null +++ b/Unittests/stdafx.cpp @@ -0,0 +1 @@ +#include "stdafx.h" diff --git a/Unittests/stdafx.h b/Unittests/stdafx.h new file mode 100644 index 0000000..97fae35 --- /dev/null +++ b/Unittests/stdafx.h @@ -0,0 +1,5 @@ +#pragma once + +#define BOOST_TEST_MODULE L4Unittests +#include + diff --git a/inc/L4/Epoch/Config.h b/inc/L4/Epoch/Config.h new file mode 100644 index 0000000..295b2f3 --- /dev/null +++ b/inc/L4/Epoch/Config.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +namespace L4 +{ + +// EpochManagerConfig struct. +struct EpochManagerConfig +{ + // "numActionQueues" indicates how many action containers there will be in order to + // increase the throughput of registering an action. + // "performActionsInParallelThreshold" indicates the threshold value above which + // the actions are performed in parallel. + // "maxNumThreadsToPerformActions" indicates how many threads will be used when + // performing an action in parallel. + explicit EpochManagerConfig( + std::uint32_t epochQueueSize = 1000, + std::chrono::milliseconds epochProcessingInterval = std::chrono::milliseconds{ 1000 }, + std::uint8_t numActionQueues = 1) + : m_epochQueueSize{ epochQueueSize } + , m_epochProcessingInterval{ epochProcessingInterval } + , m_numActionQueues{ numActionQueues } + {} + + std::uint32_t m_epochQueueSize; + std::chrono::milliseconds m_epochProcessingInterval; + std::uint8_t m_numActionQueues; +}; + +} // namespace L4 diff --git a/inc/L4/Epoch/EpochActionManager.h b/inc/L4/Epoch/EpochActionManager.h new file mode 100644 index 0000000..8e82b0f --- /dev/null +++ b/inc/L4/Epoch/EpochActionManager.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "IEpochActionManager.h" +#include "Utils/Lock.h" + +namespace L4 +{ + + +// EpochActionManager provides functionalities to add actions at an epoch and to perform +// actions up to the given epoch. +class EpochActionManager +{ +public: + // "numActionQueues" indicates how many action containers there will be in order to + // increase the throughput of registering an action. This will be re-calculated to + // the next highest power of two so that the "&" operator can be used for accessing + // the next queue. + explicit EpochActionManager(std::uint8_t numActionQueues); + + // Adds an action at a given epoch counter. + // This function is thread-safe. + void RegisterAction(std::uint64_t epochCounter, IEpochActionManager::Action&& action); + + // Perform actions whose associated epoch counter value is less than + // the given epoch counter value, and returns the number of actions performed. + std::uint64_t PerformActions(std::uint64_t epochCounter); + + EpochActionManager(const EpochActionManager&) = delete; + EpochActionManager& operator=(const EpochActionManager&) = delete; + +private: + using Mutex = Utils::CriticalSection; + using Lock = std::lock_guard; + + using Actions = std::vector; + + // The following structure needs to be sorted by the epoch counter. + // If the performance of using std::map becomes an issue, we can revisit this. + using EpochToActions = std::map; + + using EpochToActionsWithLock = std::tuple, EpochToActions>; + + // Run actions based on the configuration. + void ApplyActions(Actions& actions); + + // Stores mapping from a epoch counter to actions to perform. + std::vector m_epochToActionsList; + + // Used to point to the next EpochToActions to simulate round-robin access. + std::atomic m_counter; +}; + + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Epoch/EpochQueue.h b/inc/L4/Epoch/EpochQueue.h new file mode 100644 index 0000000..5d8d813 --- /dev/null +++ b/inc/L4/Epoch/EpochQueue.h @@ -0,0 +1,165 @@ +#pragma once + +#include +#include +#include +#include "Interprocess/Container/Vector.h" +#include "Utils/Exception.h" +#include "Utils/Lock.h" + +namespace L4 +{ + +// EpochQueue struct represents reference counts for each epoch. +// Each value of the queue (fixed-size array) is the reference counts at an index, +// where an index represents an epoch (time). +template < + typename TSharableLock, + typename TExclusiveLock, + typename Allocator = std::allocator +> +struct EpochQueue +{ + static_assert( + std::is_same::value, + "mutex type should be the same"); + +public: + EpochQueue( + std::uint64_t epochCounter, + std::uint32_t queueSize, + Allocator allocator = Allocator()) + : m_frontIndex{ epochCounter } + , m_backIndex{ epochCounter } + , m_mutexForBackIndex{} + , m_refCounts{ queueSize, Allocator::rebind::other(allocator) } + { + if (queueSize == 0U) + { + throw RuntimeException("Zero queue size is not allowed."); + } + } + + using SharableLock = TSharableLock; + using ExclusiveLock = TExclusiveLock; + using RefCount = std::atomic; + using RefCounts = Interprocess::Container::Vector< + RefCount, + typename Allocator::template rebind::other>; + + // The followings (m_frontIndex and m_backIndex) are + // accessed/updated only by the owner thread (only one thread), thus + // they don't require any synchronization. + std::size_t m_frontIndex; + + // Back index represents the latest epoch counter value. Note that + // this is accessed/updated by multiple threads, thus requires + // synchronization. + std::size_t m_backIndex; + + // Read/Write lock for m_backIndex. + typename SharableLock::mutex_type m_mutexForBackIndex; + + // Reference counts per epoch count. + // The index represents the epoch counter value and the value represents the reference counts. + RefCounts m_refCounts; +}; + + +// EpochRefManager provides functionality of adding/removing references +// to the epoch counter. +template +class EpochRefManager +{ +public: + explicit EpochRefManager(EpochQueue& epochQueue) + : m_epochQueue(epochQueue) + {} + + // Increment a reference to the current epoch counter. + // This function is thread-safe. + std::uint64_t AddRef() + { + // The synchronization is needed for EpochCounterManager::AddNewEpoch(). + EpochQueue::SharableLock lock(m_epochQueue.m_mutexForBackIndex); + + ++m_epochQueue.m_refCounts[m_epochQueue.m_backIndex % m_epochQueue.m_refCounts.size()]; + + return m_epochQueue.m_backIndex; + } + + + // Decrement a reference count for the given epoch counter. + // This function is thread-safe. + void RemoveRef(std::uint64_t epochCounter) + { + auto& refCounter = m_epochQueue.m_refCounts[epochCounter % m_epochQueue.m_refCounts.size()]; + + if (refCounter == 0) + { + throw RuntimeException("Reference counter is invalid."); + } + + --refCounter; + } + + EpochRefManager(const EpochRefManager&) = delete; + EpochRefManager& operator=(const EpochRefManager&) = delete; + +private: + EpochQueue& m_epochQueue; +}; + + +// EpochCounterManager provides functionality of updating the current epoch counter +// and getting the latest unreferenced epoch counter. +template +class EpochCounterManager +{ +public: + explicit EpochCounterManager(EpochQueue& epochQueue) + : m_epochQueue(epochQueue) + {} + + // Increments the current epoch count by one. + // This function is thread-safe. + void AddNewEpoch() + { + // The synchronization is needed for EpochRefManager::AddRef(). + EpochQueue::ExclusiveLock lock(m_epochQueue.m_mutexForBackIndex); + + ++m_epochQueue.m_backIndex; + + // TODO: check for the overwrap and throw. + } + + // Returns the epoch count in the queue where it is the biggest epoch + // count such that all other epoch counts' references are zeros. + // Note that this function is NOT thread safe, and should be run on the + // same thread as the one that calls AddNewEpoch(). + std::uint64_t RemoveUnreferenceEpochCounters() + { + while (m_epochQueue.m_backIndex > m_epochQueue.m_frontIndex) + { + if (m_epochQueue.m_refCounts[m_epochQueue.m_frontIndex % m_epochQueue.m_refCounts.size()] == 0U) + { + ++m_epochQueue.m_frontIndex; + } + else + { + // There are references to the front of the queue and will return this front index. + break; + } + } + + return m_epochQueue.m_frontIndex; + } + + EpochCounterManager(const EpochCounterManager&) = delete; + EpochCounterManager& operator=(const EpochCounterManager&) = delete; + +private: + EpochQueue& m_epochQueue; +}; + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Epoch/EpochRefPolicy.h b/inc/L4/Epoch/EpochRefPolicy.h new file mode 100644 index 0000000..435c98f --- /dev/null +++ b/inc/L4/Epoch/EpochRefPolicy.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include + +namespace L4 +{ + +// EpochRefPolicy class +template +class EpochRefPolicy +{ +public: + explicit EpochRefPolicy(EpochRefManager& epochRefManager) + : m_epochRefManager{ epochRefManager } + , m_epochCounter{ m_epochRefManager.AddRef() } + {} + + EpochRefPolicy(EpochRefPolicy&& epochRefPolicy) + : m_epochRefManager{ epochRefPolicy.m_epochRefManager } + , m_epochCounter{ epochRefPolicy.m_epochCounter } + { + epochRefPolicy.m_epochCounter = boost::integer_traits::const_max; + } + + ~EpochRefPolicy() + { + if (m_epochCounter != boost::integer_traits::const_max) + { + m_epochRefManager.RemoveRef(m_epochCounter); + } + } + + EpochRefPolicy(const EpochRefPolicy&) = delete; + EpochRefPolicy& operator=(const EpochRefPolicy&) = delete; + +private: + EpochRefManager& m_epochRefManager; + std::uint64_t m_epochCounter; +}; + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Epoch/IEpochActionManager.h b/inc/L4/Epoch/IEpochActionManager.h new file mode 100644 index 0000000..3bd9297 --- /dev/null +++ b/inc/L4/Epoch/IEpochActionManager.h @@ -0,0 +1,22 @@ +#pragma once + +#include + +namespace L4 +{ + + +// IEpochActionManager interface exposes an API for registering an Action. +struct IEpochActionManager +{ + using Action = std::function; + + virtual ~IEpochActionManager() {}; + + // Register actions on the latest epoch in the queue and the action is + // performed when the epoch is removed from the queue. + virtual void RegisterAction(Action&& action) = 0; +}; + + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/Cache/HashTable.h b/inc/L4/HashTable/Cache/HashTable.h new file mode 100644 index 0000000..7c3c959 --- /dev/null +++ b/inc/L4/HashTable/Cache/HashTable.h @@ -0,0 +1,384 @@ +#pragma once + +#include +#include +#include +#include "detail/ToRawPointer.h" +#include "Epoch/IEpochActionManager.h" +#include "HashTable/IHashTable.h" +#include "HashTable/ReadWrite/HashTable.h" +#include "HashTable/Cache/Metadata.h" +#include "Utils/Clock.h" + +namespace L4 +{ +namespace HashTable +{ +namespace Cache +{ + +// ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides +// the functionality to read data given a key. +template +class ReadOnlyHashTable + : public virtual ReadWrite::ReadOnlyHashTable + , protected Clock +{ +public: + using Base = ReadWrite::ReadOnlyHashTable; + + class Iterator; + + ReadOnlyHashTable( + HashTable& hashTable, + std::chrono::seconds recordTimeToLive) + : Base{ + hashTable, + RecordSerializer{ + hashTable.m_setting.m_fixedKeySize, + hashTable.m_setting.m_fixedValueSize, + Metadata::c_metaDataSize } } + , m_recordTimeToLive{ recordTimeToLive } + {} + + virtual bool Get(const Key& key, Value& value) const override + { + const auto status = GetInternal(key, value); + + // Note that the following const_cast is safe and necessary to update cache hit information. + const_cast(GetPerfData()).Increment( + status + ? HashTablePerfCounter::CacheHitCount + : HashTablePerfCounter::CacheMissCount); + + return status; + } + + virtual IIteratorPtr GetIterator() const override + { + return std::make_unique( + m_hashTable, + m_recordSerializer, + m_recordTimeToLive, + GetCurrentEpochTime()); + } + + ReadOnlyHashTable(const ReadOnlyHashTable&) = delete; + ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete; + +protected: + bool GetInternal(const Key& key, Value& value) const + { + if (!Base::Get(key, value)) + { + return false; + } + + assert(value.m_size > Metadata::c_metaDataSize); + + // If the record with the given key is found, check if the record is expired or not. + // Note that the following const_cast is safe and necessary to update the access status. + Metadata metaData{ const_cast(reinterpret_cast(value.m_data)) }; + if (metaData.IsExpired(GetCurrentEpochTime(), m_recordTimeToLive)) + { + return false; + } + + metaData.UpdateAccessStatus(true); + + value.m_data += Metadata::c_metaDataSize; + value.m_size -= Metadata::c_metaDataSize; + + return true; + } + + std::chrono::seconds m_recordTimeToLive; +}; + + +template +class ReadOnlyHashTable::Iterator : public Base::Iterator +{ +public: + using Base = typename Base::Iterator; + + Iterator( + const HashTable& hashTable, + const RecordSerializer& recordDeserializer, + std::chrono::seconds recordTimeToLive, + std::chrono::seconds currentEpochTime) + : Base(hashTable, recordDeserializer) + , m_recordTimeToLive{ recordTimeToLive } + , m_currentEpochTime{ currentEpochTime } + {} + + Iterator(Iterator&& other) + : Base(std::move(other)) + , m_recordTimeToLive{ std::move(other.m_recordTimeToLive) } + , m_currentEpochTime{ std::move(other.m_currentEpochTime) } + {} + + bool MoveNext() override + { + if (!Base::MoveNext()) + { + return false; + } + + do + { + const Metadata metaData{ + const_cast( + reinterpret_cast( + Base::GetValue().m_data)) }; + + if (!metaData.IsExpired(m_currentEpochTime, m_recordTimeToLive)) + { + return true; + } + } while (Base::MoveNext()); + + return false; + } + + Value GetValue() const override + { + auto value = Base::GetValue(); + value.m_data += Metadata::c_metaDataSize; + value.m_size -= Metadata::c_metaDataSize; + + return value; + } + +private: + std::chrono::seconds m_recordTimeToLive; + std::chrono::seconds m_currentEpochTime; +}; + + +// The following warning is from the virtual inheritance and safe to disable in this case. +// https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx +#pragma warning(push) +#pragma warning(disable:4250) + +// WritableHashTable class implements IWritableHashTable interface and also provides +// the read only access (Get()) to the hash table. +template +class WritableHashTable + : public ReadOnlyHashTable + , public ReadWrite::WritableHashTable +{ +public: + using ReadOnlyBase = ReadOnlyHashTable; + using WritableBase = typename ReadWrite::WritableHashTable; + + WritableHashTable( + HashTable& hashTable, + IEpochActionManager& epochManager, + std::uint64_t maxCacheSizeInBytes, + std::chrono::seconds recordTimeToLive, + bool forceTimeBasedEviction) + : ReadOnlyBase::Base( + hashTable, + RecordSerializer{ + hashTable.m_setting.m_fixedKeySize, + hashTable.m_setting.m_fixedValueSize, + Metadata::c_metaDataSize }) + , ReadOnlyBase(hashTable, recordTimeToLive) + , WritableBase(hashTable, epochManager) + , m_maxCacheSizeInBytes{ maxCacheSizeInBytes } + , m_forceTimeBasedEviction{ forceTimeBasedEviction } + , m_currentEvictBucketIndex{ 0U } + {} + + using ReadOnlyBase::Get; + using ReadOnlyBase::GetPerfData; + + virtual void Add(const Key& key, const Value& value) override + { + if (m_forceTimeBasedEviction) + { + EvictBasedOnTime(key); + } + + Evict(key.m_size + value.m_size + Metadata::c_metaDataSize); + + WritableBase::Add(CreateRecordBuffer(key, value)); + } + + virtual ISerializerPtr GetSerializer() const override + { + throw std::exception("Not implemented yet."); + } + +private: + using Mutex = std::mutex; + using Lock = std::lock_guard; + + void EvictBasedOnTime(const Key& key) + { + const auto bucketIndex = GetBucketInfo(key).first; + + auto* entry = &m_hashTable.m_buckets[bucketIndex]; + + const auto curEpochTime = GetCurrentEpochTime(); + + HashTable::Lock lock{ m_hashTable.GetMutex(bucketIndex) }; + + while (entry != nullptr) + { + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) + { + const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed); + + if (data != nullptr) + { + const Metadata metadata{ + const_cast( + reinterpret_cast( + m_recordSerializer.Deserialize(*data).m_value.m_data)) }; + + if (metadata.IsExpired(curEpochTime, m_recordTimeToLive)) + { + WritableBase::Remove(*entry, i); + m_hashTable.m_perfData.Increment(HashTablePerfCounter::EvictedRecordsCount); + } + } + } + + entry = entry->m_next.Load(std::memory_order_relaxed); + } + } + + // Evict uses CLOCK algorithm to evict records based on expiration and access status + // until the number of bytes freed match the given number of bytes needed. + void Evict(std::uint64_t bytesNeeded) + { + std::uint64_t numBytesToFree = CalculateNumBytesToFree(bytesNeeded); + if (numBytesToFree == 0U) + { + return; + } + + // Start evicting records with a lock. + Lock evictLock{ m_evictMutex }; + + // Recalculate the number of bytes to free since other thread may have already evicted. + numBytesToFree = CalculateNumBytesToFree(bytesNeeded); + if (numBytesToFree == 0U) + { + return; + } + + const auto curEpochTime = GetCurrentEpochTime(); + + // The max number of iterations we are going through per eviction is twice the number + // of buckets so that it can clear the access status. Note that this is the worst + // case scenario and the eviction process should exit much quicker in a normal case. + auto& buckets = m_hashTable.m_buckets; + std::uint64_t numIterationsRemaining = buckets.size() * 2U; + + while (numBytesToFree > 0U && numIterationsRemaining-- > 0U) + { + const auto currentBucketIndex = m_currentEvictBucketIndex++ % buckets.size(); + auto& bucket = buckets[currentBucketIndex]; + + // Lock the bucket since another thread can bypass Evict() since TotalDataSize can + // be updated before the lock on m_evictMutex is released. + HashTable::UniqueLock lock{ m_hashTable.GetMutex(currentBucketIndex) }; + HashTable::Entry* entry = &bucket; + + while (entry != nullptr) + { + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) + { + const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed); + + if (data != nullptr) + { + const auto record = m_recordSerializer.Deserialize(*data); + const auto& value = record.m_value; + + Metadata metadata{ + const_cast( + reinterpret_cast( + value.m_data)) }; + + // Evict this record if + // 1: the record is expired, or + // 2: the entry is not recently accessed (and unset the access bit if set). + if (metadata.IsExpired(curEpochTime, m_recordTimeToLive) + || !metadata.UpdateAccessStatus(false)) + { + const auto numBytesFreed = record.m_key.m_size + value.m_size; + numBytesToFree = (numBytesFreed >= numBytesToFree) ? 0U : numBytesToFree - numBytesFreed; + + WritableBase::Remove(*entry, i); + + m_hashTable.m_perfData.Increment(HashTablePerfCounter::EvictedRecordsCount); + } + } + } + + entry = entry->m_next.Load(std::memory_order_relaxed); + } + } + } + + // Given the number of bytes needed, it calculates the number of bytes + // to free based on the max cache size. + std::uint64_t CalculateNumBytesToFree(std::uint64_t bytesNeeded) const + { + const auto& perfData = GetPerfData(); + + const std::uint64_t totalDataSize = + perfData.Get(HashTablePerfCounter::TotalKeySize) + + perfData.Get(HashTablePerfCounter::TotalValueSize) + + perfData.Get(HashTablePerfCounter::TotalIndexSize); + + if ((bytesNeeded < m_maxCacheSizeInBytes) + && (totalDataSize + bytesNeeded <= m_maxCacheSizeInBytes)) + { + // There are enough free bytes. + return 0U; + } + + // (totalDataSize > m_maxCacheSizeInBytes) case is possible: + // 1) If multiple threads are evicting and adding at the same time. + // For example, if thread A was evicting and thread B could have + // used the evicted bytes before thread A consumed. + // 2) If max cache size is set lower than expectation. + return (totalDataSize > m_maxCacheSizeInBytes) + ? (totalDataSize - m_maxCacheSizeInBytes + bytesNeeded) + : bytesNeeded; + } + + RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value) + { + const auto bufferSize = m_recordSerializer.CalculateBufferSize(key, value); + auto buffer = Detail::to_raw_pointer( + m_hashTable.GetAllocator().allocate(bufferSize)); + + std::uint32_t metaDataBuffer; + Metadata{ &metaDataBuffer, GetCurrentEpochTime() }; + + // 4-byte Metadata is inserted between key and value buffer. + return m_recordSerializer.Serialize( + key, + value, + Value{ reinterpret_cast(&metaDataBuffer), sizeof(metaDataBuffer) }, + buffer, + bufferSize); + } + + Mutex m_evictMutex; + const std::uint64_t m_maxCacheSizeInBytes; + const bool m_forceTimeBasedEviction; + std::uint64_t m_currentEvictBucketIndex; +}; + +#pragma warning(pop) + +} // namespace Cache +} // namespace HashTable +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/Cache/Metadata.h b/inc/L4/HashTable/Cache/Metadata.h new file mode 100644 index 0000000..2bce511 --- /dev/null +++ b/inc/L4/HashTable/Cache/Metadata.h @@ -0,0 +1,116 @@ +#pragma once + +#include +#include +#include + +namespace L4 +{ +namespace HashTable +{ +namespace Cache +{ + + +// Metadata class that stores caching related data. +// It stores access bit to indicate whether a record is recently accessed +// as well as the epoch time when a record is created. +// Note that this works regardless of the alignment of the metadata passed in. +class Metadata +{ +public: + // Constructs Metadata with the current epoch time. + Metadata(std::uint32_t* metadata, std::chrono::seconds curEpochTime) + : Metadata{ metadata } + { + *m_metadata = curEpochTime.count() & s_epochTimeMask; + } + + explicit Metadata(std::uint32_t* metadata) + : m_metadata{ metadata } + { + assert(m_metadata != nullptr); + } + + // Returns the stored epoch time. + std::chrono::seconds GetEpochTime() const + { + // *m_metadata even on the not-aligned memory should be fine since + // only the byte that contains the access bit is modified, and + // byte read is atomic. + return std::chrono::seconds{ *m_metadata & s_epochTimeMask }; + } + + // Returns true if the stored epoch time is expired based + // on the given current epoch time and time-to-live value. + bool IsExpired( + std::chrono::seconds curEpochTime, + std::chrono::seconds timeToLive) const + { + assert(curEpochTime >= GetEpochTime()); + return (curEpochTime - GetEpochTime()) > timeToLive; + } + + // Returns true if the access status is on. + bool IsAccessed() const + { + return !!(GetAccessByte() & s_accessSetMask); + } + + // If "set" is true, turn on the access bit in the given metadata and store it. + // If "set" is false, turn off the access bit. + // Returns true if the given metadata's access bit was originally on. + bool UpdateAccessStatus(bool set) + { + const auto isAccessBitOn = IsAccessed(); + + // Set the bit only if the bit is not set, and vice versa. + if (set != isAccessBitOn) + { + if (set) + { + GetAccessByte() |= s_accessSetMask; + } + else + { + GetAccessByte() &= s_accessUnsetMask; + } + } + + return isAccessBitOn; + } + + static constexpr std::uint16_t c_metaDataSize = sizeof(std::uint32_t); + +private: + std::uint8_t GetAccessByte() const + { + return reinterpret_cast(m_metadata)[s_accessBitByte]; + } + + std::uint8_t& GetAccessByte() + { + return reinterpret_cast(m_metadata)[s_accessBitByte]; + } + + // TODO: Create an endian test and assert it. (Works only on little endian). + // The byte that contains the most significant bit. + static constexpr std::uint8_t s_accessBitByte = 3U; + + // Most significant bit is set. + static constexpr std::uint8_t s_accessSetMask = 1U << 7; + static constexpr std::uint8_t s_accessUnsetMask = s_accessSetMask ^ 0xFF; + + // The rest of bits other than the most significant bit are set. + static constexpr std::uint32_t s_epochTimeMask = 0x7FFFFFFF; + + // The most significant bit is a CLOCK bit. It is set to 1 upon access + // and reset to 0 by the cache eviction. + // The rest of the bits are used for storing the epoch time in seconds. + std::uint32_t* m_metadata = nullptr; +}; + + +} // namespace Cache +} // namespace HashTable +} // namespace L4 diff --git a/inc/L4/HashTable/Common/Record.h b/inc/L4/HashTable/Common/Record.h new file mode 100644 index 0000000..5e74144 --- /dev/null +++ b/inc/L4/HashTable/Common/Record.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include "HashTable/IHashTable.h" +#include "Utils/Exception.h" + +namespace L4 +{ +namespace HashTable +{ + +// Record struct consists of key and value pair. +struct Record +{ + using Key = IReadOnlyHashTable::Key; + using Value = IReadOnlyHashTable::Value; + + Record() = default; + + Record( + const Key& key, + const Value& value) + : m_key{ key } + , m_value{ value } + {} + + Key m_key; + Value m_value; +}; + + +// RecordBuffer is a thin wrapper struct around a raw buffer array (pointer). +// The warning is "nonstandard extension used : zero-sized array in struct/union." +#pragma warning (push) +#pragma warning (disable:4200) + +struct RecordBuffer +{ + std::uint8_t m_buffer[]; +}; + +#pragma warning (pop) + + +// RecordSerializer provides a functionality to serialize/deserialize a record information. +class RecordSerializer +{ +public: + using Key = Record::Key; + using Value = Record::Value; + using KeySize = Key::size_type; + using ValueSize = Value::size_type; + + RecordSerializer( + KeySize fixedKeySize, + ValueSize fixedValueSize, + ValueSize metadataSize = 0U) + : m_fixedKeySize{ fixedKeySize } + , m_fixedValueSize{ fixedValueSize } + , m_metadataSize{ metadataSize } + {} + + // Returns the number of bytes needed for serializing the given key and value. + std::size_t CalculateBufferSize(const Key& key, const Value& value) const + { + return + ((m_fixedKeySize != 0) + ? m_fixedKeySize + : (key.m_size + sizeof(KeySize))) + + ((m_fixedValueSize != 0) + ? m_fixedValueSize + m_metadataSize + : (value.m_size + sizeof(ValueSize) + m_metadataSize)); + } + + // Returns the number bytes used for key and value sizes. + std::size_t CalculateRecordOverhead() const + { + return + (m_fixedKeySize != 0 ? 0U : sizeof(KeySize)) + + (m_fixedValueSize != 0 ? 0U : sizeof(ValueSize)); + } + + // Serializes the given key and value to the given buffer. + // Note that the buffer size is at least as big as the number of bytes + // returned by CalculateBufferSize(). + RecordBuffer* Serialize( + const Key& key, + const Value& value, + std::uint8_t* const buffer, + std::size_t bufferSize) const + { + Validate(key, value); + + assert(CalculateBufferSize(key, value) <= bufferSize); + (void)bufferSize; + + const auto start = SerializeSizes(buffer, key.m_size, value.m_size); + memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size); + memcpy_s(buffer + start + key.m_size, value.m_size, value.m_data, value.m_size); + + return reinterpret_cast(buffer); + } + + // Serializes the given key, value and meta value to the given buffer. + // The meta value is serialized between key and value. + // Note that the buffer size is at least as big as the number of bytes + // returned by CalculateBufferSize(). + RecordBuffer* Serialize( + const Key& key, + const Value& value, + const Value& metaValue, + std::uint8_t* const buffer, + std::size_t bufferSize) const + { + Validate(key, value, metaValue); + + assert(CalculateBufferSize(key, value) <= bufferSize); + (void)bufferSize; + + const auto start = SerializeSizes(buffer, key.m_size, value.m_size + metaValue.m_size); + memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size); + memcpy_s(buffer + start + key.m_size, metaValue.m_size, metaValue.m_data, metaValue.m_size); + memcpy_s(buffer + start + key.m_size + metaValue.m_size, value.m_size, value.m_data, value.m_size); + + return reinterpret_cast(buffer); + } + + // Deserializes the given buffer and returns a Record object. + Record Deserialize(const RecordBuffer& buffer) const + { + Record record; + + const auto* dataBuffer = buffer.m_buffer; + + auto& key = record.m_key; + if (m_fixedKeySize != 0) + { + key.m_size = m_fixedKeySize; + } + else + { + key.m_size = *reinterpret_cast(dataBuffer); + dataBuffer += sizeof(KeySize); + } + + auto& value = record.m_value; + if (m_fixedValueSize != 0) + { + value.m_size = m_fixedValueSize + m_metadataSize; + } + else + { + value.m_size = *reinterpret_cast(dataBuffer); + dataBuffer += sizeof(ValueSize); + } + + key.m_data = dataBuffer; + value.m_data = dataBuffer + key.m_size; + + return record; + } + +private: + // Validates key and value sizes when fixed sizes are set. + // Throws an exception if invalid sizes are used. + void Validate(const Key& key, const Value& value) const + { + if ((m_fixedKeySize != 0 && key.m_size != m_fixedKeySize) + || (m_fixedValueSize != 0 && value.m_size != m_fixedValueSize)) + { + throw RuntimeException("Invalid key or value sizes are given."); + } + } + + // Validates against the given meta value. + void Validate(const Key& key, const Value& value, const Value& metaValue) const + { + Validate(key, value); + + if (m_metadataSize != metaValue.m_size) + { + throw RuntimeException("Invalid meta value size is given."); + } + } + + // Serializes size information to the given buffer. + // It assumes that buffer has enough size for serialization. + std::size_t SerializeSizes( + std::uint8_t* const buffer, + KeySize keySize, + ValueSize valueSize) const + { + auto curBuffer = buffer; + if (m_fixedKeySize == 0) + { + *reinterpret_cast(curBuffer) = keySize; + curBuffer += sizeof(keySize); + } + + if (m_fixedValueSize == 0) + { + *reinterpret_cast(curBuffer) = valueSize; + curBuffer += sizeof(valueSize); + } + + return curBuffer - buffer; + } + + const KeySize m_fixedKeySize; + const ValueSize m_fixedValueSize; + const ValueSize m_metadataSize; +}; + + +} // namespace HashTable +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/Common/SettingAdapter.h b/inc/L4/HashTable/Common/SettingAdapter.h new file mode 100644 index 0000000..f2b3787 --- /dev/null +++ b/inc/L4/HashTable/Common/SettingAdapter.h @@ -0,0 +1,32 @@ +#pragma once + +#include +#include "HashTable/Common/SharedHashTable.h" +#include "HashTable/Config.h" + +namespace L4 +{ +namespace HashTable +{ + +// SettingAdapter class provides a functionality to convert a HashTableConfig::Setting object +// to a SharedHashTable::Setting object. +class SettingAdapter +{ +public: + template + typename SharedHashTable::Setting Convert(const HashTableConfig::Setting& from) const + { + typename SharedHashTable::Setting to; + + to.m_numBuckets = from.m_numBuckets; + to.m_numBucketsPerMutex = (std::max)(from.m_numBucketsPerMutex.get_value_or(1U), 1U); + to.m_fixedKeySize = from.m_fixedKeySize.get_value_or(0U); + to.m_fixedValueSize = from.m_fixedValueSize.get_value_or(0U); + + return to; + } +}; + +} // namespace HashTable +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/Common/SharedHashTable.h b/inc/L4/HashTable/Common/SharedHashTable.h new file mode 100644 index 0000000..f004cbf --- /dev/null +++ b/inc/L4/HashTable/Common/SharedHashTable.h @@ -0,0 +1,206 @@ +#pragma once + +#include +#include + +#include "HashTable/IHashTable.h" +#include "Interprocess/Container/Vector.h" +#include "Log/PerfCounter.h" +#include "Utils/AtomicOffsetPtr.h" +#include "Utils/Exception.h" +#include "Utils/Lock.h" + +namespace L4 +{ +namespace HashTable +{ + +// SharedHashTable struct represents the hash table structure. +template +struct SharedHashTable +{ + using Data = TData; + using Allocator = TAllocator; + + // HashTable::Entry struct represents an entry in the chained bucket list. + // Entry layout is as follows: + // + // | tag1 | tag2 | tag3 | tag4 | tag5 | tag6 | tag7 | tag 8 | 1 + // | tag9 | tag10 | tag11 | tag12 | tag13 | tag14 | tag15 | tag 16 | 2 + // | Data1 pointer | 3 + // | Data2 pointer | 4 + // | Data3 pointer | 5 + // | Data4 pointer | 6 + // | Data5 pointer | 7 + // | Data6 pointer | 8 + // | Data7 pointer | 9 + // | Data8 pointer | 10 + // | Data9 pointer | 11 + // | Data10 pointer | 12 + // | Data11 pointer | 13 + // | Data12 pointer | 14 + // | Data13 pointer | 15 + // | Data14 pointer | 16 + // | Data15 pointer | 17 + // | Data16 pointer | 18 + // | Entry pointer to the next Entry | 19 + // <----------------------8 bytes ----------------------------------> + // , where tag1 is a tag for Data1, tag2 for Data2, and so on. A tag value can be looked up + // first before going to the corresponding Data for a quick check. + // Also note that a byte read is atomic in modern processors so that tag is just + // std::uint8_t instead of being atomic. Even in the case where the tag value read is a garbage , + // this is acceptable because of the followings: + // 1) if the garbage value was a hit where it should have been a miss: the actual key comparison will fail, + // 2) if the garbage value was a miss where it should have been a hit: the key value must + // have been changed since the tag was changed, so it will be looked up correctly + // after the tag value written is visible correctly. Note that we don't need to guarantee the timing of + // writing and reading (meaning the value written should be visible to the reader right away). + // + // Note about the CPU cache. In previous implementation, the Entry was 64 bytes to fit in the CPU cache. + // However, this resulted in lots of wasted space. For example, when the ratio of the number of expected records + // to the number of buckets was 2:1, only 85% buckets were occupied. After experiments, if you have 10:1 ratio, + // you will have 99.98% utilization of buckets. This required having more data per Entry, and the ideal number + // (after experiments) turned out to be 16 records per Entry. Also, because of how CPU fetches contiguous memory, + // this didn't have any impact on micro-benchmarking. + struct Entry + { + Entry() = default; + + // Releases deallocates all the memories of the chained entries including + // the data list in the current Entry. + void Release(Allocator allocator) + { + auto dataDeleter = [allocator](auto& data) + { + auto dataToDelete = data.Load(); + if (dataToDelete != nullptr) + { + dataToDelete->~Data(); + Allocator::rebind::other(allocator).deallocate(dataToDelete, 1U); + } + }; + + // Delete all the chained entries, not including itself. + auto curEntry = m_next.Load(); + + while (curEntry != nullptr) + { + auto entryToDelete = curEntry; + + // Copy m_next for the next iteration. + curEntry = entryToDelete->m_next.Load(); + + // Delete all the data within this entry. + for (auto& data : entryToDelete->m_dataList) + { + dataDeleter(data); + } + + // Clean the current entry itself. + entryToDelete->~Entry(); + Allocator::rebind::other(allocator).deallocate(entryToDelete, 1U); + } + + // Delete all the data from the head of chained entries. + for (auto& data : m_dataList) + { + dataDeleter(data); + } + } + + static constexpr std::uint8_t c_numDataPerEntry = 16U; + + std::array m_tags{ 0U }; + + std::array, c_numDataPerEntry> m_dataList{}; + + Utils::AtomicOffsetPtr m_next{}; + }; + + static_assert(sizeof(Entry) == 152, "Entry should be 152 bytes."); + + struct Setting + { + using KeySize = IReadOnlyHashTable::Key::size_type; + using ValueSize = IReadOnlyHashTable::Value::size_type; + + Setting() = default; + + explicit Setting( + std::uint32_t numBuckets, + std::uint32_t numBucketsPerMutex = 1U, + KeySize fixedKeySize = 0U, + ValueSize fixedValueSize = 0U) + : m_numBuckets{ numBuckets } + , m_numBucketsPerMutex{ numBucketsPerMutex } + , m_fixedKeySize{ fixedKeySize } + , m_fixedValueSize{ fixedValueSize } + {} + + std::uint32_t m_numBuckets = 1U; + std::uint32_t m_numBucketsPerMutex = 1U; + KeySize m_fixedKeySize = 0U; + ValueSize m_fixedValueSize = 0U; + }; + + SharedHashTable::SharedHashTable( + const Setting& setting, + Allocator allocator) + : m_allocator{ allocator } + , m_setting{ setting } + , m_buckets{ setting.m_numBuckets, Allocator::rebind::other(m_allocator) } + , m_mutexes{ + (std::max)(setting.m_numBuckets / (std::max)(setting.m_numBucketsPerMutex, 1U), 1U), + Allocator::rebind::other(m_allocator) } + , m_perfData{} + { + m_perfData.Set(HashTablePerfCounter::BucketsCount, m_buckets.size()); + m_perfData.Set( + HashTablePerfCounter::TotalIndexSize, + (m_buckets.size() * sizeof(Entry)) + + (m_mutexes.size() * sizeof(Mutex)) + + sizeof(SharedHashTable)); + } + + SharedHashTable::~SharedHashTable() + { + for (auto& bucket : m_buckets) + { + bucket.Release(m_allocator); + } + } + + using Mutex = Utils::ReaderWriterLockSlim; + using Lock = std::lock_guard; + using UniqueLock = std::unique_lock; + + using Buckets = Interprocess::Container::Vector::other>; + using Mutexes = Interprocess::Container::Vector::other>; + + template + auto GetAllocator() const + { + return Allocator::rebind::other(m_allocator); + } + + Mutex& GetMutex(std::size_t index) + { + return m_mutexes[index % m_mutexes.size()]; + } + + Allocator m_allocator; + + const Setting m_setting; + + Buckets m_buckets; + + Mutexes m_mutexes; + + HashTablePerfData m_perfData; + + SharedHashTable(const SharedHashTable&) = delete; + SharedHashTable& operator=(const SharedHashTable&) = delete; +}; + +} // namespace HashTable +} // namespace L4 diff --git a/inc/L4/HashTable/Config.h b/inc/L4/HashTable/Config.h new file mode 100644 index 0000000..864f6d8 --- /dev/null +++ b/inc/L4/HashTable/Config.h @@ -0,0 +1,91 @@ +#pragma once + +#include +#include +#include +#include +#include +#include "HashTable/IHashTable.h" +#include "Serialization/IStream.h" +#include "Utils/Properties.h" + +namespace L4 +{ + +// HashTableConfig struct. +struct HashTableConfig +{ + struct Setting + { + using KeySize = IReadOnlyHashTable::Key::size_type; + using ValueSize = IReadOnlyHashTable::Value::size_type; + + explicit Setting( + std::uint32_t numBuckets, + boost::optional numBucketsPerMutex = {}, + boost::optional fixedKeySize = {}, + boost::optional fixedValueSize = {}) + : m_numBuckets{ numBuckets } + , m_numBucketsPerMutex{ numBucketsPerMutex } + , m_fixedKeySize{ fixedKeySize } + , m_fixedValueSize{ fixedValueSize } + {} + + std::uint32_t m_numBuckets; + boost::optional m_numBucketsPerMutex; + boost::optional m_fixedKeySize; + boost::optional m_fixedValueSize; + }; + + struct Cache + { + Cache( + std::uint64_t maxCacheSizeInBytes, + std::chrono::seconds recordTimeToLive, + bool forceTimeBasedEviction) + : m_maxCacheSizeInBytes{ maxCacheSizeInBytes } + , m_recordTimeToLive{ recordTimeToLive } + , m_forceTimeBasedEviction{ forceTimeBasedEviction } + {} + + std::uint64_t m_maxCacheSizeInBytes; + std::chrono::seconds m_recordTimeToLive; + bool m_forceTimeBasedEviction; + }; + + struct Serializer + { + using Properties = Utils::Properties; + + Serializer( + std::shared_ptr streamReader = {}, + boost::optional properties = {}) + : m_streamReader{ streamReader } + , m_properties{ properties } + {} + + std::shared_ptr m_streamReader; + boost::optional m_properties; + }; + + HashTableConfig( + std::string name, + Setting setting, + boost::optional cache = {}, + boost::optional serializer = {}) + : m_name{ std::move(name) } + , m_setting{ std::move(setting) } + , m_cache{ cache } + , m_serializer{ serializer } + { + assert(m_setting.m_numBuckets > 0U + || (m_serializer && (serializer->m_streamReader != nullptr))); + } + + std::string m_name; + Setting m_setting; + boost::optional m_cache; + boost::optional m_serializer; +}; + +} // namespace L4 diff --git a/inc/L4/HashTable/IHashTable.h b/inc/L4/HashTable/IHashTable.h new file mode 100644 index 0000000..98dbe54 --- /dev/null +++ b/inc/L4/HashTable/IHashTable.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include "Log/PerfCounter.h" +#include "Serialization/IStream.h" +#include "Utils/Properties.h" + + +namespace L4 +{ + + +// IReadOnlyHashTable interface for read-only access to the hash table. +struct IReadOnlyHashTable +{ + // Blob struct that represents a memory blob. + template + struct Blob + { + using size_type = TSize; + + explicit Blob(const std::uint8_t* data = nullptr, size_type size = 0U) + : m_data{ data } + , m_size{ size } + { + static_assert(std::numeric_limits::is_integer, "size_type is not an integer."); + } + + bool operator==(const Blob& other) const + { + return (m_size == other.m_size) + && !memcmp(m_data, other.m_data, m_size); + } + + bool operator!=(const Blob& other) const + { + return !(*this == other); + } + + const std::uint8_t* m_data; + size_type m_size; + }; + + using Key = Blob; + using Value = Blob; + + struct IIterator; + + using IIteratorPtr = std::unique_ptr; + + virtual ~IReadOnlyHashTable() = default; + + virtual bool Get(const Key& key, Value& value) const = 0; + + virtual IIteratorPtr GetIterator() const = 0; + + virtual const HashTablePerfData& GetPerfData() const = 0; +}; + + +// IReadOnlyHashTable::IIterator interface for the hash table iterator. +struct IReadOnlyHashTable::IIterator +{ + virtual ~IIterator() = default; + + virtual void Reset() = 0; + + virtual bool MoveNext() = 0; + + virtual Key GetKey() const = 0; + + virtual Value GetValue() const = 0; +}; + + +// IWritableHashTable interface for write access to the hash table. +struct IWritableHashTable : public virtual IReadOnlyHashTable +{ + struct ISerializer; + + using ISerializerPtr = std::unique_ptr; + + virtual void Add(const Key& key, const Value& value) = 0; + + virtual bool Remove(const Key& key) = 0; + + virtual ISerializerPtr GetSerializer() const = 0; +}; + + +// IWritableHashTable::ISerializer interface for serializing hash table. +struct IWritableHashTable::ISerializer +{ + virtual ~ISerializer() = default; + + virtual void Serialize( + IStreamWriter& writer, + const Utils::Properties& properties) = 0; +}; + + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/ReadWrite/HashTable.h b/inc/L4/HashTable/ReadWrite/HashTable.h new file mode 100644 index 0000000..0cc25d5 --- /dev/null +++ b/inc/L4/HashTable/ReadWrite/HashTable.h @@ -0,0 +1,578 @@ +#pragma once + +#include +#include +#include +#include "detail/ToRawPointer.h" +#include "Epoch/IEpochActionManager.h" +#include "HashTable/Common/SharedHashTable.h" +#include "HashTable/Common/Record.h" +#include "HashTable/IHashTable.h" +#include "HashTable/ReadWrite/Serializer.h" +#include "Log/PerfCounter.h" +#include "Serialization/IStream.h" +#include "Utils/Exception.h" +#include "Utils/MurmurHash3.h" +#include "Utils/Properties.h" + +namespace L4 +{ + +// ReadWriteHashTable is a general purpose hash table where the look up is look free. +namespace HashTable +{ +namespace ReadWrite +{ + +// ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides +// the functionality to read data given a key. +template +class ReadOnlyHashTable : public virtual IReadOnlyHashTable +{ +public: + using HashTable = SharedHashTable; + + class Iterator; + + explicit ReadOnlyHashTable( + HashTable& hashTable, + boost::optional recordSerializer = boost::none) + : m_hashTable{ hashTable } + , m_recordSerializer{ + recordSerializer + ? *recordSerializer + : RecordSerializer{ + m_hashTable.m_setting.m_fixedKeySize, + m_hashTable.m_setting.m_fixedValueSize } } + {} + + virtual bool Get(const Key& key, Value& value) const override + { + const auto bucketInfo = GetBucketInfo(key); + const auto* entry = &m_hashTable.m_buckets[bucketInfo.first]; + + while (entry != nullptr) + { + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) + { + if (bucketInfo.second == entry->m_tags[i]) + { + // There could be a race condition where m_dataList[i] is updated during access. + // Therefore, load it once and save it (it's safe to store it b/c the memory + // will not be deleted until ref count becomes 0). + const auto data = entry->m_dataList[i].Load(std::memory_order_acquire); + + if (data != nullptr) + { + const auto record = m_recordSerializer.Deserialize(*data); + if (record.m_key == key) + { + value = record.m_value; + return true; + } + } + } + } + + entry = entry->m_next.Load(std::memory_order_acquire); + } + + return false; + } + + virtual IIteratorPtr GetIterator() const override + { + return std::make_unique(m_hashTable, m_recordSerializer); + } + + virtual const HashTablePerfData& GetPerfData() const override + { + // Synchronizes with any std::memory_order_release if there exists, so that + // HashTablePerfData has the latest values at the moment when GetPerfData() is called. + std::atomic_thread_fence(std::memory_order_acquire); + return m_hashTable.m_perfData; + } + + ReadOnlyHashTable(const ReadOnlyHashTable&) = delete; + ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete; + +protected: + // GetBucketInfo returns a pair, where the first is the index to the bucket + // and the second is the tag value for the given key. + // In this hash table, we treat tag value of 0 as empty (see WritableHashTable::Remove()), + // so in the worst case scenario, where an entry has an empty data list and the tag + // value returned for the key is 0, the look up cost is up to 6 checks. We can do something + // smarter by using the unused two bytes per Entry, but since an Entry object fits into + // CPU cache, the extra overhead should be minimal. + std::pair GetBucketInfo(const Key& key) const + { + std::array hash; + MurmurHash3_x64_128(key.m_data, key.m_size, 0U, hash.data()); + + return { + static_cast(hash[0] % m_hashTable.m_buckets.size()), + static_cast(hash[1]) }; + } + + HashTable& m_hashTable; + + RecordSerializer m_recordSerializer; +}; + + +// ReadOnlyHashTable::Iterator class implements IIterator interface and provides +// read-only iterator for the ReadOnlyHashTable. +template +class ReadOnlyHashTable::Iterator : public IIterator +{ +public: + Iterator( + const HashTable& hashTable, + const RecordSerializer& recordDeserializer) + : m_hashTable{ hashTable } + , m_recordSerializer{ recordDeserializer } + , m_currentBucketIndex{ -1 } + , m_currentRecordIndex{ 0U } + , m_currentEntry{ nullptr } + {} + + Iterator(Iterator&& iterator) + : m_hashTable{ std::move(iterator.m_hashTable) } + , m_recordSerializer{ std::move(iterator.recordDeserializer) } + , m_currentBucketIndex{ std::move(iterator.m_currentBucketIndex) } + , m_currentRecordIndex{ std::move(iterator.m_currentRecordIndex) } + , m_currentEntry{ std::move(iterator.m_currentEntry) } + {} + + void Reset() override + { + m_currentBucketIndex = -1; + m_currentRecordIndex = 0U; + m_currentEntry = nullptr; + } + + bool MoveNext() override + { + if (IsEnd()) + { + return false; + } + + if (m_currentEntry != nullptr) + { + MoveToNextData(); + } + + assert(m_currentRecordIndex < HashTable::Entry::c_numDataPerEntry); + + while ((m_currentEntry == nullptr) + || (m_currentRecord = m_currentEntry->m_dataList[m_currentRecordIndex].Load()) == nullptr) + { + if (m_currentEntry == nullptr) + { + ++m_currentBucketIndex; + m_currentRecordIndex = 0U; + + if (IsEnd()) + { + return false; + } + + m_currentEntry = &m_hashTable.m_buckets[m_currentBucketIndex]; + } + else + { + MoveToNextData(); + } + } + + assert(m_currentEntry != nullptr); + assert(m_currentRecord != nullptr); + + return true; + } + + Key GetKey() const override + { + if (!IsValid()) + { + throw RuntimeException("HashTableIterator is not correctly used."); + } + + return m_recordSerializer.Deserialize(*m_currentRecord).m_key; + } + + Value GetValue() const override + { + if (!IsValid()) + { + throw RuntimeException("HashTableIterator is not correctly used."); + } + + return m_recordSerializer.Deserialize(*m_currentRecord).m_value; + } + + Iterator(const Iterator&) = delete; + Iterator& operator=(const Iterator&) = delete; + +private: + bool IsValid() const + { + return !IsEnd() + && (m_currentEntry != nullptr) + && (m_currentRecord != nullptr); + } + + bool IsEnd() const + { + return m_currentBucketIndex == static_cast(m_hashTable.m_buckets.size()); + } + + void MoveToNextData() + { + if (++m_currentRecordIndex >= HashTable::Entry::c_numDataPerEntry) + { + m_currentRecordIndex = 0U; + m_currentEntry = m_currentEntry->m_next.Load(); + } + } + + const HashTable& m_hashTable; + const RecordSerializer& m_recordSerializer; + + std::int64_t m_currentBucketIndex; + std::uint8_t m_currentRecordIndex; + + const typename HashTable::Entry* m_currentEntry; + const RecordBuffer* m_currentRecord; +}; + + +// The following warning is from the virtual inheritance and safe to disable in this case. +// https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx +#pragma warning(push) +#pragma warning(disable:4250) + +// WritableHashTable class implements IWritableHashTable interface and also provides +// the read only access (Get()) to the hash table. +// Note the virtual inheritance on ReadOnlyHashTable so that any derived class +// can have only one ReadOnlyHashTable base class instance. +template +class WritableHashTable + : public virtual ReadOnlyHashTable + , public IWritableHashTable +{ +public: + WritableHashTable( + HashTable& hashTable, + IEpochActionManager& epochManager) + : ReadOnlyHashTable(hashTable) + , m_epochManager{ epochManager } + {} + + virtual void Add(const Key& key, const Value& value) override + { + Add(CreateRecordBuffer(key, value)); + } + + virtual bool Remove(const Key& key) override + { + const auto bucketInfo = GetBucketInfo(key); + + auto* entry = &m_hashTable.m_buckets[bucketInfo.first]; + + HashTable::Lock lock{ m_hashTable.GetMutex(bucketInfo.first) }; + + // Note that similar to Add(), the following block is performed inside a critical section, + // therefore, it is safe to do "Load"s with memory_order_relaxed. + while (entry != nullptr) + { + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) + { + if (bucketInfo.second == entry->m_tags[i]) + { + const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed); + + if (data != nullptr) + { + const auto record = m_recordSerializer.Deserialize(*data); + if (record.m_key == key) + { + Remove(*entry, i); + return true; + } + } + } + } + + entry = entry->m_next.Load(std::memory_order_relaxed); + } + + return false; + } + + virtual ISerializerPtr GetSerializer() const override + { + return std::make_unique(m_hashTable); + } + +protected: + void Add(RecordBuffer* recordToAdd) + { + assert(recordToAdd != nullptr); + + const auto newRecord = m_recordSerializer.Deserialize(*recordToAdd); + const auto& newKey = newRecord.m_key; + const auto& newValue = newRecord.m_value; + + Stat stat{ newKey.m_size, newValue.m_size }; + + const auto bucketInfo = GetBucketInfo(newKey); + + auto* curEntry = &m_hashTable.m_buckets[bucketInfo.first]; + + HashTable::Entry* entryToUpdate = nullptr; + std::uint8_t curDataIndex = 0U; + + HashTable::UniqueLock lock{ m_hashTable.GetMutex(bucketInfo.first) }; + + // Note that the following block is performed inside a critical section, therefore, + // it is safe to do "Load"s with memory_order_relaxed. + while (curEntry != nullptr) + { + ++stat.m_chainIndex; + + for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) + { + const auto data = curEntry->m_dataList[i].Load(std::memory_order_relaxed); + + if (data == nullptr) + { + if (entryToUpdate == nullptr) + { + // Found an entry with no data set, but still need to go through the end of + // the list to see if an entry with the given key exists. + entryToUpdate = curEntry; + curDataIndex = i; + } + } + else if (curEntry->m_tags[i] == bucketInfo.second) + { + const auto oldRecord = m_recordSerializer.Deserialize(*data); + if (newKey == oldRecord.m_key) + { + // Will overwrite this entry data. + entryToUpdate = curEntry; + curDataIndex = i; + stat.m_oldValueSize = oldRecord.m_value.m_size; + break; + } + } + } + + // Found the entry data to replaces. + if (stat.m_oldValueSize != 0U) + { + break; + } + + // Check if this is the end of the chaining. If so, create a new entry if we haven't found + // any entry to update along the way. + if (entryToUpdate == nullptr && curEntry->m_next.Load(std::memory_order_relaxed) == nullptr) + { + curEntry->m_next.Store( + new (Detail::to_raw_pointer( + m_hashTable.GetAllocator().allocate(1U))) + HashTable::Entry(), + std::memory_order_release); + + stat.m_isNewEntryAdded = true; + } + + curEntry = curEntry->m_next.Load(std::memory_order_relaxed); + } + + assert(entryToUpdate != nullptr); + + auto recordToDelete = UpdateRecord(*entryToUpdate, curDataIndex, recordToAdd, bucketInfo.second); + + lock.unlock(); + + UpdatePerfDataForAdd(stat); + + ReleaseRecord(recordToDelete); + } + + // The chainIndex is the 1-based index for the given entry in the chained bucket list. + // It is assumed that this function is called under a lock. + void Remove(typename HashTable::Entry& entry, std::uint8_t index) + { + auto recordToDelete = UpdateRecord(entry, index, nullptr, 0U); + + assert(recordToDelete != nullptr); + + const auto record = m_recordSerializer.Deserialize(*recordToDelete); + + UpdatePerfDataForRemove( + Stat{ + record.m_key.m_size, + record.m_value.m_size, + 0U + }); + + ReleaseRecord(recordToDelete); + } + +private: + struct Stat; + + class Serializer; + + RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value) + { + const auto bufferSize = m_recordSerializer.CalculateBufferSize(key, value); + auto buffer = Detail::to_raw_pointer( + m_hashTable.GetAllocator().allocate(bufferSize)); + + return m_recordSerializer.Serialize(key, value, buffer, bufferSize); + } + + RecordBuffer* UpdateRecord(typename HashTable::Entry& entry, std::uint8_t index, RecordBuffer* newRecord, std::uint8_t newTag) + { + // This function should be called under a lock, so calling with memory_order_relaxed for Load() is safe. + auto& recordHolder = entry.m_dataList[index]; + auto oldRecord = recordHolder.Load(std::memory_order_relaxed); + + recordHolder.Store(newRecord, std::memory_order_release); + entry.m_tags[index] = newTag; + + return oldRecord; + } + + void ReleaseRecord(RecordBuffer* record) + { + if (record == nullptr) + { + return; + } + + m_epochManager.RegisterAction( + [this, record]() + { + record->~RecordBuffer(); + m_hashTable.GetAllocator().deallocate(record, 1U); + }); + } + + void UpdatePerfDataForAdd(const Stat& stat) + { + auto& perfData = m_hashTable.m_perfData; + + if (stat.m_oldValueSize != 0U) + { + // Updating the existing record. Therefore, no change in the key size. + perfData.Add(HashTablePerfCounter::TotalValueSize, + static_cast(stat.m_valueSize) - stat.m_oldValueSize); + } + else + { + // We are adding a new data instead of replacing. + perfData.Add(HashTablePerfCounter::TotalKeySize, stat.m_keySize); + perfData.Add(HashTablePerfCounter::TotalValueSize, stat.m_valueSize); + perfData.Add(HashTablePerfCounter::TotalIndexSize, + // Record overhead. + m_recordSerializer.CalculateRecordOverhead() + // Entry overhead if created. + + (stat.m_isNewEntryAdded ? sizeof(HashTable::Entry) : 0U)); + + perfData.Min(HashTablePerfCounter::MinKeySize, stat.m_keySize); + perfData.Max(HashTablePerfCounter::MaxKeySize, stat.m_keySize); + + perfData.Increment(HashTablePerfCounter::RecordsCount); + + if (stat.m_isNewEntryAdded) + { + perfData.Increment(HashTablePerfCounter::ChainingEntriesCount); + + if (stat.m_chainIndex > 1U) + { + perfData.Max(HashTablePerfCounter::MaxBucketChainLength, stat.m_chainIndex); + } + } + } + + perfData.Min(HashTablePerfCounter::MinValueSize, stat.m_valueSize); + perfData.Max(HashTablePerfCounter::MaxValueSize, stat.m_valueSize); + } + + void UpdatePerfDataForRemove(const Stat& stat) + { + auto& perfData = m_hashTable.m_perfData; + + perfData.Decrement(HashTablePerfCounter::RecordsCount); + perfData.Subtract(HashTablePerfCounter::TotalKeySize, stat.m_keySize); + perfData.Subtract(HashTablePerfCounter::TotalValueSize, stat.m_valueSize); + perfData.Subtract(HashTablePerfCounter::TotalIndexSize, m_recordSerializer.CalculateRecordOverhead()); + } + + IEpochActionManager& m_epochManager; +}; + +#pragma warning(pop) + + +// WritableHashTable::Stat struct encapsulates stats for Add()/Remove(). +template +struct WritableHashTable::Stat +{ + using KeySize = Key::size_type; + using ValueSize = Value::size_type; + + explicit Stat( + KeySize keySize = 0U, + ValueSize valueSize = 0U, + ValueSize oldValueSize = 0U, + std::uint32_t chainIndex = 0U, + bool isNewEntryAdded = false) + : m_keySize{ keySize } + , m_valueSize{ valueSize } + , m_oldValueSize{ oldValueSize } + , m_chainIndex{ chainIndex } + , m_isNewEntryAdded{ isNewEntryAdded } + {} + + KeySize m_keySize; + ValueSize m_valueSize; + ValueSize m_oldValueSize; + std::uint32_t m_chainIndex; + bool m_isNewEntryAdded; +}; + + +// WritableHashTable::Serializer class that implements ISerializer, which provides +// the functionality to serialize the WritableHashTable. +template +class WritableHashTable::Serializer : public IWritableHashTable::ISerializer +{ +public: + explicit Serializer(HashTable& hashTable) + : m_hashTable{ hashTable } + {} + + Serializer(const Serializer&) = delete; + Serializer& operator=(const Serializer&) = delete; + + void Serialize( + IStreamWriter& writer, + const Utils::Properties& /* properties */) override + { + ReadWrite::Serializer{}.Serialize(m_hashTable, writer); + } + +private: + HashTable& m_hashTable; +}; + +} // namespace ReadWrite +} // namespace HashTable +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/HashTable/ReadWrite/Serializer.h b/inc/L4/HashTable/ReadWrite/Serializer.h new file mode 100644 index 0000000..98ffd7c --- /dev/null +++ b/inc/L4/HashTable/ReadWrite/Serializer.h @@ -0,0 +1,268 @@ +#pragma once + +#include +#include +#include "Epoch/IEpochActionManager.h" +#include "Log/PerfCounter.h" +#include "Serialization/IStream.h" +#include "Serialization/SerializerHelper.h" +#include "Utils/Exception.h" +#include "Utils/Properties.h" + +namespace L4 +{ +namespace HashTable +{ +namespace ReadWrite +{ + +// Note that the HashTable template parameter in this file is +// HashTable::ReadWrite::ReadOnlyHashTable::HashTable. +// However, due to the cyclic dependency, it needs to be passed as a template type. + +// Interface for a serializer for the given HashTable type. +template +struct ISerializer +{ + virtual ~ISerializer() = default; + + // Assumes writer has not been started yet, thus the serializer is responsible + // for calling Begin() and End()on the writer. + virtual void Serialize( + HashTable& hashTable, + IStreamWriter& writer) const = 0; +}; + +// Interface for a deserializer for the given Memory and HashTable type. +template +struct IDeserializer +{ + virtual ~IDeserializer() = default; + + // Assumes that reader.Begin() has already been called and the version info has been read. + // The serializer should call reader.End() before Deserialize() returns. + virtual typename Memory::template UniquePtr Deserialize( + Memory& memory, + IStreamReader& reader) const = 0; +}; + + +// All the deprecated (previous versions) serializer should be put inside the Deprecated namespace. +// Removing any of the Deprecated serializers from the source code will require the major package version change. +namespace Deprecated +{ +} // namespace Deprecated + + +namespace Current +{ + + +constexpr std::uint8_t c_version = 3U; + +// Current serializer used for serializing hash tables. +// The serialization format of Serializer is: +// followed by +// If the next byte is set to 1: +// +// Otherwise, end of the records. +template +class Serializer : public ISerializer +{ +public: + Serializer() = default; + + Serializer(const Serializer&) = delete; + Serializer& operator=(const Serializer&) = delete; + + void Serialize( + HashTable& hashTable, + IStreamWriter& writer) const override + { + writer.Begin(); + + auto& perfData = hashTable.m_perfData; + perfData.Set(HashTablePerfCounter::RecordsCountSavedFromSerializer, 0); + + SerializerHelper helper(writer); + + helper.Serialize(c_version); + + helper.Serialize(&hashTable.m_setting, sizeof(hashTable.m_setting)); + + ReadOnlyHashTable readOnlyHashTable(hashTable); + + auto iterator = readOnlyHashTable.GetIterator(); + while (iterator->MoveNext()) + { + helper.Serialize(true); // Indicates record exists. + const auto key = iterator->GetKey(); + const auto value = iterator->GetValue(); + + helper.Serialize(key.m_size); + helper.Serialize(key.m_data, key.m_size); + + helper.Serialize(value.m_size); + helper.Serialize(value.m_data, value.m_size); + + perfData.Increment(HashTablePerfCounter::RecordsCountSavedFromSerializer); + } + + helper.Serialize(false); // Indicates the end of records. + + // Flush perf counter so that the values are up to date when GetPerfData() is called. + std::atomic_thread_fence(std::memory_order_release); + + writer.End(); + } +}; + +// Current Deserializer used for deserializing hash tables. +template +class Deserializer : public IDeserializer +{ +public: + explicit Deserializer(const Utils::Properties& /* properties */) + {} + + Deserializer(const Deserializer&) = delete; + Deserializer& operator=(const Deserializer&) = delete; + + typename Memory::template UniquePtr Deserialize( + Memory& memory, + IStreamReader& reader) const override + { + DeserializerHelper helper(reader); + + HashTable::Setting setting; + helper.Deserialize(setting); + + auto hashTable{ memory.MakeUnique( + setting, + memory.GetAllocator()) }; + + EpochActionManager epochActionManager; + + using Allocator = typename Memory:: template Allocator<>; + + WritableHashTable writableHashTable( + *hashTable, + epochActionManager); + + auto& perfData = hashTable->m_perfData; + + std::vector keyBuffer; + std::vector valueBuffer; + + bool hasMoreData = false; + helper.Deserialize(hasMoreData); + + while (hasMoreData) + { + IReadOnlyHashTable::Key key; + IReadOnlyHashTable::Value value; + + helper.Deserialize(key.m_size); + keyBuffer.resize(key.m_size); + helper.Deserialize(keyBuffer.data(), key.m_size); + key.m_data = keyBuffer.data(); + + helper.Deserialize(value.m_size); + valueBuffer.resize(value.m_size); + helper.Deserialize(valueBuffer.data(), value.m_size); + value.m_data = valueBuffer.data(); + + writableHashTable.Add(key, value); + + helper.Deserialize(hasMoreData); + + perfData.Increment(HashTablePerfCounter::RecordsCountLoadedFromSerializer); + } + + // Flush perf counter so that the values are up to date when GetPerfData() is called. + std::atomic_thread_fence(std::memory_order_release); + + reader.End(); + + return hashTable; + } + +private: + // Deserializer internally uses WritableHashTable for deserialization, therefore + // an implementation of IEpochActionManager is needed. Since all the keys in the hash table + // are expected to be unique, no RegisterAction() should be called. + class EpochActionManager : public IEpochActionManager + { + public: + void RegisterAction(Action&& /* action */) override + { + // Since it is assumed that the serializer is loading from the stream generated by the same serializer, + // it is guaranteed that all the keys are unique (a property of a hash table). Therefore, RegisterAction() + // should not be called by the WritableHashTable. + throw RuntimeException("RegisterAction() should not be called from the serializer."); + } + }; +}; + + +} // namespace Current + + +// Serializer is the main driver for serializing a hash table. +// It always uses the Current::Serializer for serializing a hash table. +template +class Serializer +{ +public: + Serializer() = default; + Serializer(const Serializer&) = delete; + Serializer& operator=(const Serializer&) = delete; + + void Serialize( + HashTable& hashTable, + IStreamWriter& writer) const + { + Current::Serializer{}.Serialize(hashTable, writer); + } +}; + +// Deserializer is the main driver for deserializing the input stream to create a hash table. +template +class Deserializer +{ +public: + explicit Deserializer(const Utils::Properties& properties) + : m_properties(properties) + {} + + Deserializer(const Deserializer&) = delete; + Deserializer& operator=(const Deserializer&) = delete; + + typename Memory::template UniquePtr Deserialize( + Memory& memory, + IStreamReader& reader) const + { + reader.Begin(); + + std::uint8_t version = 0U; + reader.Read(reinterpret_cast(&version), sizeof(version)); + + switch (version) + { + case Current::c_version: + return Current::Deserializer{ m_properties }.Deserialize(memory, reader); + default: + boost::format err("Unsupported version '%1%' is given."); + err % version; + throw RuntimeException(err.str()); + } + } + +private: + const Utils::Properties& m_properties; +}; + +} // namespace ReadWrite +} // namespace HashTable +} // namespace L4 + diff --git a/inc/L4/Interprocess/Container/List.h b/inc/L4/Interprocess/Container/List.h new file mode 100644 index 0000000..3151560 --- /dev/null +++ b/inc/L4/Interprocess/Container/List.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +namespace L4 +{ +namespace Interprocess +{ +namespace Container +{ + + +template +using List = boost::interprocess::list; + + +} // namespace Container +} // namespace Interprocess +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Interprocess/Container/String.h b/inc/L4/Interprocess/Container/String.h new file mode 100644 index 0000000..bf1c9ad --- /dev/null +++ b/inc/L4/Interprocess/Container/String.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +namespace L4 +{ +namespace Interprocess +{ +namespace Container +{ + + +template +using String = boost::interprocess::basic_string, Allocator>; + + +} // namespace Container +} // namespace Interprocess +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Interprocess/Container/Vector.h b/inc/L4/Interprocess/Container/Vector.h new file mode 100644 index 0000000..d851db4 --- /dev/null +++ b/inc/L4/Interprocess/Container/Vector.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +namespace L4 +{ +namespace Interprocess +{ +namespace Container +{ + + +template +using Vector = boost::interprocess::vector; + + +} // namespace Container +} // namespace Interprocess +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/LocalMemory/Context.h b/inc/L4/LocalMemory/Context.h new file mode 100644 index 0000000..3408cc0 --- /dev/null +++ b/inc/L4/LocalMemory/Context.h @@ -0,0 +1,55 @@ +#pragma once + +#include "Epoch/EpochRefPolicy.h" +#include "EpochManager.h" +#include "HashTableManager.h" + +namespace L4 +{ +namespace LocalMemory +{ + +class Context : private EpochRefPolicy +{ +public: + Context( + HashTableManager& hashTableManager, + EpochManager::EpochRefManager& epochRefManager) + : EpochRefPolicy(epochRefManager) + , m_hashTableManager{ hashTableManager } + {} + + Context(Context&& context) + : EpochRefPolicy(std::move(context)) + , m_hashTableManager{ context.m_hashTableManager } + {} + + const IReadOnlyHashTable& operator[](const char* name) const + { + return m_hashTableManager.GetHashTable(name); + } + + IWritableHashTable& operator[](const char* name) + { + return m_hashTableManager.GetHashTable(name); + } + + const IReadOnlyHashTable& operator[](std::size_t index) const + { + return m_hashTableManager.GetHashTable(index); + } + + IWritableHashTable& operator[](std::size_t index) + { + return m_hashTableManager.GetHashTable(index); + } + + Context(const Context&) = delete; + Context& operator=(const Context&) = delete; + +private: + HashTableManager& m_hashTableManager; +}; + +} // namespace LocalMemory +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/LocalMemory/EpochManager.h b/inc/L4/LocalMemory/EpochManager.h new file mode 100644 index 0000000..c67e90c --- /dev/null +++ b/inc/L4/LocalMemory/EpochManager.h @@ -0,0 +1,124 @@ +#pragma once + +#include +#include +#include +#include "Epoch/Config.h" +#include "Epoch/EpochActionManager.h" +#include "Epoch/EpochQueue.h" +#include "Log/PerfCounter.h" +#include "Utils/Lock.h" +#include "Utils/RunningThread.h" + +namespace L4 +{ +namespace LocalMemory +{ + +// EpochManager aggregates epoch-related functionalities such as adding/removing +// client epoch queues, registering/performing actions, and updating the epoch counters. +class EpochManager : public IEpochActionManager +{ +public: + using EpochQueue = EpochQueue< + boost::shared_lock_guard, + std::lock_guard>; + + using EpochRefManager = EpochRefManager; + + EpochManager( + const EpochManagerConfig& config, + ServerPerfData& perfData) + : m_perfData{ perfData } + , m_config{ config } + , m_currentEpochCounter{ 0U } + , m_epochQueue{ + m_currentEpochCounter, + m_config.m_epochQueueSize } + , m_epochRefManager{ m_epochQueue } + , m_epochCounterManager{ m_epochQueue } + , m_epochActionManager{ config.m_numActionQueues } + , m_processingThread{ + m_config.m_epochProcessingInterval, + [this] + { + this->Remove(); + this->Add(); + }} + {} + + EpochRefManager& GetEpochRefManager() + { + return m_epochRefManager; + } + + void RegisterAction(Action&& action) override + { + m_epochActionManager.RegisterAction(m_currentEpochCounter, std::move(action)); + m_perfData.Increment(ServerPerfCounter::PendingActionsCount); + } + + EpochManager(const EpochManager&) = delete; + EpochManager& operator=(const EpochManager&) = delete; + +private: + using EpochCounterManager = EpochCounterManager; + + using ProcessingThread = Utils::RunningThread>; + + // Enqueues a new epoch whose counter value is last counter + 1. + // This is called from the server side. + void Add() + { + // Incrementing the global epoch counter before incrementing per-connection + // epoch counter is safe (not so the other way around). If the server process is + // registering an action at the m_currentEpochCounter in RegisterAction(), + // it is happening in the "future," and this means that if the client is referencing + // the memory to be deleted in the "future," it will be safe. + ++m_currentEpochCounter; + + m_epochCounterManager.AddNewEpoch(); + } + + // Dequeues any epochs whose ref counter is 0, meaning there is no reference at that time. + void Remove() + { + const auto oldestEpochCounter = m_epochCounterManager.RemoveUnreferenceEpochCounters(); + + const auto numActionsPerformed = m_epochActionManager.PerformActions(oldestEpochCounter); + + m_perfData.Subtract(ServerPerfCounter::PendingActionsCount, numActionsPerformed); + m_perfData.Set(ServerPerfCounter::LastPerformedActionsCount, numActionsPerformed); + m_perfData.Set(ServerPerfCounter::OldestEpochCounterInQueue, oldestEpochCounter); + m_perfData.Set(ServerPerfCounter::LatestEpochCounterInQueue, m_currentEpochCounter); + } + + // Reference to the performance data. + ServerPerfData& m_perfData; + + // Configuration related to epoch manager. + EpochManagerConfig m_config; + + // The global current epoch counter. + std::atomic_uint64_t m_currentEpochCounter; + + // Epoch queue. + EpochQueue m_epochQueue; + + // Handles adding/decrementing ref counts. + EpochRefManager m_epochRefManager; + + // Handles adding new epoch and finding the epoch counts that have zero ref counts. + EpochCounterManager m_epochCounterManager; + + // Handles registering/performing actions. + EpochActionManager m_epochActionManager; + + // Thread responsible for updating the current epoch counter, + // removing the unreferenced epoch counter, etc. + // Should be the last member so that it gets destroyed first. + ProcessingThread m_processingThread; +}; + +} // namespace LocalMemory +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/LocalMemory/HashTableManager.h b/inc/L4/LocalMemory/HashTableManager.h new file mode 100644 index 0000000..31ad03d --- /dev/null +++ b/inc/L4/LocalMemory/HashTableManager.h @@ -0,0 +1,89 @@ +#pragma once + +#include +#include +#include +#include "Epoch/IEpochActionManager.h" +#include "HashTable/Config.h" +#include "HashTable/ReadWrite/HashTable.h" +#include "HashTable/Cache/HashTable.h" +#include "Utils/Containers.h" +#include "Utils/Exception.h" + +namespace L4 +{ +namespace LocalMemory +{ + +class HashTableManager +{ +public: + template + std::size_t Add( + const HashTableConfig& config, + IEpochActionManager& epochActionManager, + Allocator allocator) + { + if (m_hashTableNameToIndex.find(config.m_name) != m_hashTableNameToIndex.end()) + { + throw RuntimeException("Same hash table name already exists."); + } + + using namespace HashTable; + + using InternalHashTable = ReadWrite::WritableHashTable::HashTable; + + auto internalHashTable = std::make_shared( + InternalHashTable::Setting{ + config.m_setting.m_numBuckets, + (std::max)(config.m_setting.m_numBucketsPerMutex.get_value_or(1U), 1U), + config.m_setting.m_fixedKeySize.get_value_or(0U), + config.m_setting.m_fixedValueSize.get_value_or(0U) }, + allocator); + + // TODO: Create from a serializer. + + const auto& cacheConfig = config.m_cache; + auto hashTable = + cacheConfig + ? std::make_unique>( + *internalHashTable, + epochActionManager, + cacheConfig->m_maxCacheSizeInBytes, + cacheConfig->m_recordTimeToLive, + cacheConfig->m_forceTimeBasedEviction) + : std::make_unique>( + *internalHashTable, + epochActionManager); + + m_internalHashTables.emplace_back(std::move(internalHashTable)); + m_hashTables.emplace_back(std::move(hashTable)); + + const auto newIndex = m_hashTables.size() - 1; + + m_hashTableNameToIndex.emplace(config.m_name, newIndex); + + return newIndex; + } + + IWritableHashTable& GetHashTable(const char* name) + { + assert(m_hashTableNameToIndex.find(name) != m_hashTableNameToIndex.cend()); + return GetHashTable(m_hashTableNameToIndex.find(name)->second); + } + + IWritableHashTable& GetHashTable(std::size_t index) + { + assert(index < m_hashTables.size()); + return *m_hashTables[index]; + } + +private: + Utils::StdStringKeyMap m_hashTableNameToIndex; + + std::vector m_internalHashTables; + std::vector> m_hashTables; +}; + +} // namespace LocalMemory +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/LocalMemory/HashTableService.h b/inc/L4/LocalMemory/HashTableService.h new file mode 100644 index 0000000..ea02087 --- /dev/null +++ b/inc/L4/LocalMemory/HashTableService.h @@ -0,0 +1,46 @@ +#pragma once + +#include "Context.h" +#include "EpochManager.h" +#include "HashTable/Config.h" +#include "Log/PerfCounter.h" + +namespace L4 +{ +namespace LocalMemory +{ + +class HashTableService +{ +public: + explicit HashTableService( + const EpochManagerConfig& epochManagerConfig = EpochManagerConfig()) + : m_epochManager{ epochManagerConfig, m_serverPerfData } + {} + + template > + std::size_t AddHashTable( + const HashTableConfig& config, + Allocator allocator = Allocator()) + { + return m_hashTableManager.Add(config, m_epochManager, allocator); + } + + Context GetContext() + { + return Context(m_hashTableManager, m_epochManager.GetEpochRefManager()); + } + +private: + ServerPerfData m_serverPerfData; + + HashTableManager m_hashTableManager; + + // Make sure HashTableManager is destroyed before EpochManager b/c + // it is possible that EpochManager could be processing Epoch Actions + // on hash tables. + EpochManager m_epochManager; +}; + +} // namespace LocalMemory +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Log/IPerfLogger.h b/inc/L4/Log/IPerfLogger.h new file mode 100644 index 0000000..a668d8b --- /dev/null +++ b/inc/L4/Log/IPerfLogger.h @@ -0,0 +1,38 @@ +#pragma once + +#include +#include "PerfCounter.h" + + +namespace L4 +{ + + +// IPerfLogger interface. +struct IPerfLogger +{ + struct IData; + + virtual ~IPerfLogger() = default; + + virtual void Log(const IData& data) = 0; +}; + +// IPerfLogger::IData interface that provides access to ServerPerfData and the aggregated HashTablePerfData. +// Note that the user of IPerfLogger only needs to implement IPerfLogger since IPerfLogger::IData is +// implemented internally. +struct IPerfLogger::IData +{ + using HashTablesPerfData = std::map< + std::string, + std::reference_wrapper>; + + virtual ~IData() = default; + + virtual const ServerPerfData& GetServerPerfData() const = 0; + + virtual const HashTablesPerfData& GetHashTablesPerfData() const = 0; +}; + + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Log/PerfCounter.h b/inc/L4/Log/PerfCounter.h new file mode 100644 index 0000000..118b91d --- /dev/null +++ b/inc/L4/Log/PerfCounter.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace L4 +{ + +enum class ServerPerfCounter : std::uint16_t +{ + // Connection Manager + ClientConnectionsCount = 0U, + + // EpochManager + OldestEpochCounterInQueue, + LatestEpochCounterInQueue, + PendingActionsCount, + LastPerformedActionsCount, + + Count +}; + +const std::array< + const char*, + static_cast(ServerPerfCounter::Count)> c_serverPerfCounterNames = +{ + // Connection Manager + "ClientConnectionsCount", + + // EpochManager + "OldestEpochCounterInQueue", + "LatestEpochCounterInQueue", + "PendingActionsCount", + "LastPerformedActionsCount" +}; + +enum class HashTablePerfCounter : std::uint16_t +{ + RecordsCount = 0U, + BucketsCount, + TotalKeySize, + TotalValueSize, + TotalIndexSize, + ChainingEntriesCount, + + // Max/Min counters are always increasing. In other words, we don't keep track + // of the next max record size, when the max record is deleted. + MinKeySize, + MaxKeySize, + MinValueSize, + MaxValueSize, + MaxBucketChainLength, + + RecordsCountLoadedFromSerializer, + RecordsCountSavedFromSerializer, + + // CacheHashTable specific counters. + CacheHitCount, + CacheMissCount, + EvictedRecordsCount, + + Count +}; + +const std::array< + const char*, + static_cast(HashTablePerfCounter::Count)> c_hashTablePerfCounterNames = +{ + "RecordsCount", + "BucketsCount", + "TotalKeySize", + "TotalValueSize", + "TotalIndexSize", + "ChainingEntriesCount", + "MinKeySize", + "MaxKeySize", + "MinValueSize", + "MaxValueSize", + "MaxBucketChainLength", + "RecordsCountLoadedFromSerializer", + "RecordsCountSavedFromSerializer", + "CacheHitCount", + "CacheMissCount", + "EvictedRecordsCount" +}; + + +template +class PerfCounters +{ +public: + typedef std::int64_t TValue; + typedef std::atomic TCounter; + + PerfCounters() + { + std::for_each( + std::begin(m_counters), + std::end(m_counters), + [] (TCounter& counter) + { + counter = 0; + }); + } + + // Note that since the ordering doesn't matter when the counter is updated, memory_order_relaxed + // is used for all perf counter updates. + // More from http://en.cppreference.com/w/cpp/atomic/memory_order: + // Typical use for relaxed memory ordering is updating counters, such as the reference counters + // of std::shared_ptr, since this only requires atomicity, but not ordering or synchronization. + TValue Get(TCounterEnum counterEnum) const + { + return m_counters[static_cast(counterEnum)].load(std::memory_order_relaxed); + } + + void Set(TCounterEnum counterEnum, TValue value) + { + m_counters[static_cast(counterEnum)].store(value, std::memory_order_relaxed); + } + + void Increment(TCounterEnum counterEnum) + { + m_counters[static_cast(counterEnum)].fetch_add(1, std::memory_order_relaxed); + } + + void Decrement(TCounterEnum counterEnum) + { + m_counters[static_cast(counterEnum)].fetch_sub(1, std::memory_order_relaxed); + } + + void Add(TCounterEnum counterEnum, TValue value) + { + if (value != 0) + { + m_counters[static_cast(counterEnum)].fetch_add(value, std::memory_order_relaxed); + } + } + + void Subtract(TCounterEnum counterEnum, TValue value) + { + if (value != 0) + { + m_counters[static_cast(counterEnum)].fetch_sub(value, std::memory_order_relaxed); + } + } + + void Max(TCounterEnum counterEnum, TValue value) + { + auto& counter = m_counters[static_cast(counterEnum)]; + + TValue startValue = counter.load(std::memory_order_acquire); + + do + { + // "load()" from counter is needed only once since the value of Max is + // monotonically increasing. If startValue is changed by other threads, + // compare_exchange_strong will return false and startValue will be + // written to the latest value, thus returning to this code path. + if (startValue > value) + { + return; + } + } + while (!counter.compare_exchange_strong( + startValue, + value, + std::memory_order_release, + std::memory_order_acquire)); + } + + void Min(TCounterEnum counterEnum, TValue value) + { + auto& counter = m_counters[static_cast(counterEnum)]; + + TValue startValue = counter.load(std::memory_order_acquire); + do + { + // Check the comment in Max() and Min() is monotonically decreasing. + if (startValue < value) + { + return; + } + } + while (!counter.compare_exchange_strong( + startValue, + value, + std::memory_order_release, + std::memory_order_acquire)); + } + +private: + __declspec(align(8)) TCounter m_counters[TCounterEnum::Count]; +}; + +typedef PerfCounters ServerPerfData; + +struct HashTablePerfData : public PerfCounters +{ + HashTablePerfData() + { + // Initialize any min counters to the max value. + const auto maxValue = (std::numeric_limits::max)(); + + Set(HashTablePerfCounter::MinValueSize, maxValue); + Set(HashTablePerfCounter::MinKeySize, maxValue); + + // MaxBucketChainLength starts with 1 since bucket already + // contains the entry which stores the data. + Set(HashTablePerfCounter::MaxBucketChainLength, 1); + } +}; + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Log/PerfLogger.h b/inc/L4/Log/PerfLogger.h new file mode 100644 index 0000000..b8d2c2f --- /dev/null +++ b/inc/L4/Log/PerfLogger.h @@ -0,0 +1,56 @@ +#pragma once + +#include "IPerfLogger.h" + +namespace L4 +{ + + +struct PerfLoggerManagerConfig; + + +// PerfData class, which holds the ServerPerfData and HashTablePerfData for each hash table. +// Note that PerfData owns the ServerPerfData but has only the const references to HashTablePerfData, +// which is owned by the HashTable. + +class PerfData : public IPerfLogger::IData +{ +public: + PerfData() = default; + + ServerPerfData& GetServerPerfData(); + + const ServerPerfData& GetServerPerfData() const override; + + const HashTablesPerfData& GetHashTablesPerfData() const override; + + void AddHashTablePerfData(const char* hashTableName, const HashTablePerfData& perfData); + + PerfData(const PerfData&) = delete; + PerfData& operator=(const PerfData&) = delete; + +private: + ServerPerfData m_serverPerfData; + HashTablesPerfData m_hashTablesPerfData; +}; + + +// PerfData inline implementations. + +inline ServerPerfData& PerfData::GetServerPerfData() +{ + return m_serverPerfData; +} + +inline const ServerPerfData& PerfData::GetServerPerfData() const +{ + return m_serverPerfData; +} + +inline const PerfData::HashTablesPerfData& PerfData::GetHashTablesPerfData() const +{ + return m_hashTablesPerfData; +} + + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Serialization/IStream.h b/inc/L4/Serialization/IStream.h new file mode 100644 index 0000000..c504fb6 --- /dev/null +++ b/inc/L4/Serialization/IStream.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + + +namespace L4 +{ + + +// IStream interface. +struct IStream +{ + virtual ~IStream() {} + + virtual void Begin() = 0; + + virtual void End() = 0; +}; + + +// IStreamReader interface. +struct IStreamReader : public IStream +{ + virtual void Read(std::uint8_t buffer[], std::size_t bufferSize) = 0; +}; + + +// IStreamWriter interface. +struct IStreamWriter : public IStream +{ + virtual void Write(const std::uint8_t buffer[], std::size_t bufferSize) = 0; +}; + + +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Serialization/SerializerHelper.h b/inc/L4/Serialization/SerializerHelper.h new file mode 100644 index 0000000..c257cbf --- /dev/null +++ b/inc/L4/Serialization/SerializerHelper.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include "IStream.h" + +namespace L4 +{ + +// SerializerHelper provides help functions to write to IStreamWriter. +class SerializerHelper +{ +public: + SerializerHelper(IStreamWriter& writer) + : m_writer(writer) + {} + + SerializerHelper(const SerializerHelper&) = delete; + SerializerHelper& operator=(const SerializerHelper&) = delete; + + template + void Serialize(const T& obj) + { + m_writer.Write(reinterpret_cast(&obj), sizeof(obj)); + } + + void Serialize(const void* data, std::uint32_t dataSize) + { + m_writer.Write(static_cast(data), dataSize); + } + +private: + IStreamWriter& m_writer; +}; + + +// DeserializerHelper provides help functions to read from IStreamReader. +class DeserializerHelper +{ +public: + DeserializerHelper(IStreamReader& reader) + : m_reader(reader) + { + } + + DeserializerHelper(const DeserializerHelper&) = delete; + DeserializerHelper& operator=(const DeserializerHelper&) = delete; + + template + void Deserialize(T& obj) + { + m_reader.Read(reinterpret_cast(&obj), sizeof(obj)); + } + + void Deserialize(void* data, std::uint32_t dataSize) + { + m_reader.Read(static_cast(data), dataSize); + } + +private: + IStreamReader& m_reader; +}; + + +} // namespace L4 + diff --git a/inc/L4/Utils/AtomicOffsetPtr.h b/inc/L4/Utils/AtomicOffsetPtr.h new file mode 100644 index 0000000..5a170e7 --- /dev/null +++ b/inc/L4/Utils/AtomicOffsetPtr.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include +#include + +namespace L4 +{ +namespace Utils +{ + + +// AtomicOffsetPtr provides a way to atomically update the offset pointer. +// The current boost::interprocess::offset_ptr cannot be used with std::atomic<> because +// the class is not trivially copyable. AtomicOffsetPtr borrows the same concept to calculate +// the pointer address based on the offset (boost::interprocess::ipcdetail::offset_ptr_to* functions +// are reused). +// Note that ->, *, copy/assignment operators are not implemented intentionally so that +// the user (inside this library) is aware of what he is intended to do without accidentally +// incurring any performance hits. +template +class AtomicOffsetPtr +{ +public: + AtomicOffsetPtr() + : m_offset(1) + {} + + AtomicOffsetPtr(const AtomicOffsetPtr&) = delete; + AtomicOffsetPtr& operator=(const AtomicOffsetPtr&) = delete; + + T* Load(std::memory_order memoryOrder = std::memory_order_seq_cst) const + { + return static_cast( + boost::interprocess::ipcdetail::offset_ptr_to_raw_pointer( + this, + m_offset.load(memoryOrder))); + } + + void Store(T* ptr, std::memory_order memoryOrder = std::memory_order_seq_cst) + { + m_offset.store(boost::interprocess::ipcdetail::offset_ptr_to_offset(ptr, this), memoryOrder); + } + +private: + std::atomic_uint64_t m_offset; +}; + + +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/Clock.h b/inc/L4/Utils/Clock.h new file mode 100644 index 0000000..4d83cb4 --- /dev/null +++ b/inc/L4/Utils/Clock.h @@ -0,0 +1,24 @@ +#pragma once + +#include + + +namespace L4 +{ +namespace Utils +{ + + +class EpochClock +{ +public: + std::chrono::seconds GetCurrentEpochTime() const + { + return std::chrono::duration_cast( + std::chrono::high_resolution_clock::now().time_since_epoch()); + } +}; + + +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/ComparerHasher.h b/inc/L4/Utils/ComparerHasher.h new file mode 100644 index 0000000..2c2189b --- /dev/null +++ b/inc/L4/Utils/ComparerHasher.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include +#include + + +namespace L4 +{ +namespace Utils +{ + + +// CaseInsensitiveStdStringComparer is a STL-compatible case-insensitive ANSI std::string comparer. +struct CaseInsensitiveStdStringComparer +{ + bool operator()(const std::string& str1, const std::string& str2) const + { + return _stricmp(str1.c_str(), str2.c_str()) == 0; + } +}; + +// CaseInsensitiveStringComparer is a STL-compatible case-insensitive ANSI string comparer. +struct CaseInsensitiveStringComparer +{ + bool operator()(const char* const str1, const char* const str2) const + { + return _stricmp(str1, str2) == 0; + } +}; + +// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI std::string hasher. +struct CaseInsensitiveStdStringHasher +{ + std::size_t operator()(const std::string& str) const + { + std::size_t seed = 0; + + for (auto c : str) + { + boost::hash_combine(seed, std::toupper(c)); + } + + return seed; + } +}; + +// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI string hasher. +struct CaseInsensitiveStringHasher +{ + std::size_t operator()(const char* str) const + { + assert(str != nullptr); + + std::size_t seed = 0; + + while (*str) + { + boost::hash_combine(seed, std::toupper(*str++)); + } + + return seed; + } +}; + + +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/Containers.h b/inc/L4/Utils/Containers.h new file mode 100644 index 0000000..c1a2eb1 --- /dev/null +++ b/inc/L4/Utils/Containers.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include +#include +#include "Utils/ComparerHasher.h" + + +namespace L4 +{ +namespace Utils +{ + + +// StdStringKeyMap is an unordered_map where the key is std::string. It is slower than +// StringKeyMap above, but it owns the memory of the string, so it's easier to use. +template +using StdStringKeyMap = std::unordered_map< + std::string, + TValue, + Utils::CaseInsensitiveStdStringHasher, + Utils::CaseInsensitiveStdStringComparer>; + +// StringKeyMap is an unordered_map where the key is const char*. +// The memory of the key is not owned by StringKeyMap, +// but it is faster (than StdStringKeyMap below) for look up. +template +using StringKeyMap = std::unordered_map< + const char*, + TValue, + Utils::CaseInsensitiveStringHasher, + Utils::CaseInsensitiveStringComparer>; + +// IntegerKeyMap using boost::hash and std::equal_to comparer and hasher. +template +using IntegerKeyMap = std::unordered_map< + TKey, + TValue, + boost::hash, + std::equal_to>; + + +} // namespace Utils +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Utils/Exception.h b/inc/L4/Utils/Exception.h new file mode 100644 index 0000000..c8f6ce0 --- /dev/null +++ b/inc/L4/Utils/Exception.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace L4 +{ + +// RuntimeException class used across L4 library. +class RuntimeException : public std::runtime_error +{ +public: + explicit RuntimeException(const std::string& message) + : std::runtime_error(message.c_str()) + {} + + explicit RuntimeException(const char* message) + : std::runtime_error(message) + {} +}; + +} // namespace L4 diff --git a/inc/L4/Utils/Lock.h b/inc/L4/Utils/Lock.h new file mode 100644 index 0000000..c44e881 --- /dev/null +++ b/inc/L4/Utils/Lock.h @@ -0,0 +1,88 @@ +#pragma once + +#include "Utils/Windows.h" + + +namespace L4 +{ +namespace Utils +{ + + +// Represents a RAII wrapper for Win32 CRITICAL_SECTION. +class CriticalSection : protected ::CRITICAL_SECTION +{ +public: + // Constructs and initializes the critical section. + CriticalSection() + { + ::InitializeCriticalSection(this); + } + + CriticalSection(const CriticalSection& other) = delete; + CriticalSection& operator=(const CriticalSection& other) = delete; + + // Destructs the critical section. + ~CriticalSection() + { + ::DeleteCriticalSection(this); + } + + // Waits for ownership of the critical section. + void lock() + { + ::EnterCriticalSection(this); + } + + // Releases ownership of the critical section. + void unlock() + { + ::LeaveCriticalSection(this); + } +}; + +// Represents a RAII wrapper for Win32 SRW lock. +class ReaderWriterLockSlim +{ +public: + // Constructs and initializes an SRW lock. + ReaderWriterLockSlim() + { + ::InitializeSRWLock(&m_lock); + } + + ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete; + ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete; + + // Acquires an SRW lock in shared mode. + void lock_shared() + { + ::AcquireSRWLockShared(&m_lock); + } + + // Acquires an SRW lock in exclusive mode. + void lock() + { + ::AcquireSRWLockExclusive(&m_lock); + } + + // Releases an SRW lock that was opened in shared mode. + void unlock_shared() + { + ::ReleaseSRWLockShared(&m_lock); + } + + // Releases an SRW lock that was opened in exclusive mode. + void unlock() + { + ::ReleaseSRWLockExclusive(&m_lock); + } + +private: + // Stores the Win32 SRW lock. + ::SRWLOCK m_lock; +}; + + +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/Math.h b/inc/L4/Utils/Math.h new file mode 100644 index 0000000..3fbdea5 --- /dev/null +++ b/inc/L4/Utils/Math.h @@ -0,0 +1,79 @@ +#pragma once + +#include +#include +#include + + +namespace L4 +{ +namespace Utils +{ +namespace Math +{ + + +// Rounds up the number to the nearest multiple of base. +inline std::uint64_t RoundUp(std::uint64_t number, std::uint64_t base) +{ + return base ? (((number + base - 1) / base) * base) : number; +} + +// Rounds down the number to the nearest multiple of base. +inline std::uint64_t RoundDown(std::uint64_t number, std::uint64_t base) +{ + return base ? ((number / base) * base) : number; +} + +// Returns true if the given number is a power of 2. +inline bool IsPowerOfTwo(std::uint64_t number) +{ + return number && ((number & (number - 1)) == 0); +} + +// Returns the next highest power of two from the given value. +// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2. +inline std::uint32_t NextHighestPowerOfTwo(std::uint32_t val) +{ + --val; + val |= val >> 1; + val |= val >> 2; + val |= val >> 4; + val |= val >> 8; + val |= val >> 16; + return ++val; +} + + +// Provides utility functions doing pointer related arithmetics. +namespace PointerArithmetic +{ + + +// Returns a new pointer after adding an offset. +template +inline T* Add(T* ptr, std::size_t offset) +{ + return reinterpret_cast(reinterpret_cast(ptr) + offset); +} + +// Returns a new pointer after subtracting an offset. +template +inline T* Subtract(T* ptr, std::size_t offset) +{ + return reinterpret_cast(reinterpret_cast(ptr) - offset); +} + +// Returns the absolute value of difference in the number of bytes between two pointers. +inline std::size_t Distance(const void* lhs, const void* rhs) +{ + return std::abs(reinterpret_cast(lhs) - reinterpret_cast(rhs)); +} + + +} // namespace PointerArithmetic + + +} // namespace Math +} // namespace Utils +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Utils/MurmurHash3.h b/inc/L4/Utils/MurmurHash3.h new file mode 100644 index 0000000..fb1fb22 --- /dev/null +++ b/inc/L4/Utils/MurmurHash3.h @@ -0,0 +1,37 @@ +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH3_H_ +#define _MURMURHASH3_H_ + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned long uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) && (_MSC_VER < 1600) + +#include + +#endif // !defined(_MSC_VER) || (_MSC_VER >= 1600) + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_32(const void * key, int len, uint32_t seed, void * out); + +void MurmurHash3_x86_128(const void * key, int len, uint32_t seed, void * out); + +void MurmurHash3_x64_128(const void * key, int len, uint32_t seed, void * out); + +//----------------------------------------------------------------------------- + +#endif // _MURMURHASH3_H_ \ No newline at end of file diff --git a/inc/L4/Utils/Properties.h b/inc/L4/Utils/Properties.h new file mode 100644 index 0000000..d8e3c09 --- /dev/null +++ b/inc/L4/Utils/Properties.h @@ -0,0 +1,56 @@ +#pragma once + +#include "Utils/Containers.h" + +#include + + +namespace L4 +{ +namespace Utils +{ + + +// Properties class represents a string to string map (case insensitive). +// It can be used where the configurations should be generic. +class Properties : public StdStringKeyMap +{ +public: + using Base = Utils::StdStringKeyMap; + using Value = Base::value_type; + + Properties() = default; + + // Expose a constructor with initializer_list for convenience. + Properties(std::initializer_list values) + : Base(values) + { + } + + // Returns true if the given key exists and the value associated with + // the key can be converted to the TValue type. If the conversion fails, the value + // of the given val is guaranteed to remain the same. + template + bool TryGet(const std::string& key, TValue& val) const + { + const auto it = find(key); + if (it == end()) + { + return false; + } + + TValue tmp; + if (!boost::conversion::try_lexical_convert(it->second, tmp)) + { + return false; + } + + val = tmp; + + return true; + } +}; + + +} // namespace Utils +} // namespace L4 \ No newline at end of file diff --git a/inc/L4/Utils/RunningThread.h b/inc/L4/Utils/RunningThread.h new file mode 100644 index 0000000..cf7c88b --- /dev/null +++ b/inc/L4/Utils/RunningThread.h @@ -0,0 +1,79 @@ +#pragma once + +#include +#include +#include +#include + + +namespace L4 +{ +namespace Utils +{ + + +// NoOp is a function object that doesn't do anything. +struct NoOp +{ + void operator()(...) {} +}; + +// RunningThread wraps around std::thread and repeatedly runs a given function after yielding +// for the given interval. Note that the destructor waits for the thread to stop. +template +class RunningThread +{ +public: + RunningThread( + std::chrono::milliseconds interval, + CoreFunc coreFunc, + PrepFunc prepFunc = PrepFunc()) + : m_isRunning(), + m_thread( + &RunningThread::Start, + this, + interval, + coreFunc, + prepFunc) + { + } + + ~RunningThread() + { + m_isRunning.store(false); + + if (m_thread.joinable()) + { + m_thread.join(); + } + } + + RunningThread(const RunningThread&) = delete; + RunningThread& operator=(const RunningThread&) = delete; + +private: + void Start( + std::chrono::milliseconds interval, + CoreFunc coreFunc, + PrepFunc prepFunc) + { + m_isRunning.store(true); + + prepFunc(); + + while (m_isRunning.load()) + { + coreFunc(); + + std::this_thread::sleep_for(interval); + } + } + + std::atomic_bool m_isRunning; + + std::thread m_thread; +}; + + +} // namespace Utils +} // namespace L4 diff --git a/inc/L4/Utils/Time.h b/inc/L4/Utils/Time.h new file mode 100644 index 0000000..3c9e3e9 --- /dev/null +++ b/inc/L4/Utils/Time.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include "SharedMemoryHashTable/Utils/Windows.h" + +namespace Ads +{ +namespace DE +{ +namespace SharedMemory +{ +namespace Utils +{ +namespace Time +{ + + +// Returns the current high resolution system counter value. +inline std::uint64_t GetCurrentSystemCounter() +{ + LARGE_INTEGER counter; + QueryPerformanceCounter(&counter); + return counter.QuadPart; +} + +// Returns how many ticks there are in the given resolution interval. +// Note that the given resolution interval is in the same unit as NtQueryTimerResolution(), +// which is 1/10000 ms. Thus, 10000 translates to 1 ms. +// Note that this function is based on boost::interprocess::ipcdetail::get_system_tick_in_highres_counts(). +inline std::uint32_t GetSystemTicks(std::uint32_t resolutionInterval) +{ + // Frequency in counts per second. + LARGE_INTEGER freq; + QueryPerformanceFrequency(&freq); + + std::int64_t femtoSecondsInOneCount = (1000000000000000LL - 1LL) / freq.QuadPart + 1LL; + + // Calculate the ticks count perf given resolution interval. + return static_cast( + (static_cast(resolutionInterval) * 100000000LL - 1LL) / femtoSecondsInOneCount + 1LL); +} + + +} // namespace Time +} // namespace Utils +} // namespace SharedMemory +} // namespace DE +} // namespace Ads diff --git a/inc/L4/Utils/Windows.h b/inc/L4/Utils/Windows.h new file mode 100644 index 0000000..1ee6dd0 --- /dev/null +++ b/inc/L4/Utils/Windows.h @@ -0,0 +1,57 @@ +#pragma once + +// Allow macro redefinition. +#pragma warning(push) +#pragma warning(disable:4005) + +// Explicitly excluding API groups +//#define NOGDICAPMASKS // - CC_*, LC_*, PC_*, CP_*, TC_*, RC_ +#define NOVIRTUALKEYCODES // - VK_* +//#define NOWINMESSAGES // - WM_*, EM_*, LB_*, CB_* +#define NOWINSTYLES // - WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_* +#define NOSYSMETRICS // - SM_* +#define NOMENUS // - MF_* +#define NOICONS // - IDI_* +#define NOKEYSTATES // - MK_* +#define NOSYSCOMMANDS // - SC_* +#define NORASTEROPS // - Binary and Tertiary raster ops +#define NOSHOWWINDOW // - SW_* +#define OEMRESOURCE // - OEM Resource values +#define NOATOM // - Atom Manager routines +#define NOCLIPBOARD // - Clipboard routines +#define NOCOLOR // - Screen colors +//#define NOCTLMGR // - Control and Dialog routines +#define NODRAWTEXT // - DrawText() and DT_* +#define NOGDI // - All GDI defines and routines +#define NOKERNEL // - All KERNEL defines and routines +#define NONLS // - All NLS (natural language interfaces) defines and routines +#define NOMB // - MB_* and MessageBox() +#define NOMEMMGR // - GMEM_*, LMEM_*, GHND, LHND, associated routines +#define NOMETAFILE // - typedef METAFILEPICT +#define NOMINMAX // - Macros min(a,b) and max(a,b) +//#define NOMSG // - typedef MSG and associated routines +#define NOOPENFILE // - OpenFile(), OemToAnsi, AnsiToOem, and OF_* +#define NOSCROLL // - SB_* and scrolling routines +#define NOSERVICE // - All Service Controller routines, SERVICE_ equates, etc. +#define NOSOUND // - Sound driver routines +#define NOTEXTMETRIC // - typedef TEXTMETRIC and associated routines +#define NOWH // - SetWindowsHook and WH_* +#define NOWINOFFSETS // - GWL_*, GCL_*, associated routines +#define NOCOMM // - COMM driver routines +#define NOKANJI // - Kanji support stuff. +#define NOHELP // - Help engine interface. +#define NOPROFILER // - Profiler interface. +#define NODEFERWINDOWPOS // - DeferWindowPos routines +#define NOMCX // - Modem Configuration Extensions + +// Enabling STRICT redefines certain data types so that the compiler does not permit assignment from one type to another without an explicit cast. +#define STRICT + +// Define WIN32_LEAN_AND_MEAN to exclude APIs such as Cryptography, DDE, RPC, Shell, and Windows Sockets. +// Cryptography is needed due to +//#define WIN32_LEAN_AND_MEAN + +#pragma warning(pop) + + +#include diff --git a/inc/L4/detail/ToRawPointer.h b/inc/L4/detail/ToRawPointer.h new file mode 100644 index 0000000..f8b6a37 --- /dev/null +++ b/inc/L4/detail/ToRawPointer.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +namespace L4 +{ +namespace Detail +{ + + +using boost::interprocess::ipcdetail::to_raw_pointer; + + +} // namespace Detail +} // namespace L4 \ No newline at end of file diff --git a/src/EpochActionManager.cpp b/src/EpochActionManager.cpp new file mode 100644 index 0000000..54c082b --- /dev/null +++ b/src/EpochActionManager.cpp @@ -0,0 +1,86 @@ +#include "Epoch/EpochActionManager.h" +#include "Utils/Math.h" + +#include +#include + +namespace L4 +{ + +// EpochActionManager class implementation. + +EpochActionManager::EpochActionManager(std::uint8_t numActionQueues) + : m_epochToActionsList{} + , m_counter{} +{ + // Calculate numActionQueues as the next highest power of two. + std::uint16_t newNumActionQueues = numActionQueues; + if (numActionQueues == 0U) + { + newNumActionQueues = static_cast(std::thread::hardware_concurrency()); + } + newNumActionQueues = static_cast(Utils::Math::NextHighestPowerOfTwo(newNumActionQueues)); + + assert(newNumActionQueues != 0U && Utils::Math::IsPowerOfTwo(newNumActionQueues)); + + // Initialize m_epochToActionsList. + m_epochToActionsList.resize(newNumActionQueues); + for (auto& epochToActions : m_epochToActionsList) + { + std::get<0>(epochToActions) = std::make_unique(); + } +} + + +void EpochActionManager::RegisterAction(std::uint64_t epochCounter, IEpochActionManager::Action&& action) +{ + std::uint32_t index = ++m_counter & (m_epochToActionsList.size() - 1); + auto& epochToActions = m_epochToActionsList[index]; + + Lock lock(*std::get<0>(epochToActions)); + std::get<1>(epochToActions)[epochCounter].emplace_back(std::move(action)); +} + + +std::uint64_t EpochActionManager::PerformActions(std::uint64_t epochCounter) +{ + // Actions will be moved here and performed without a lock. + Actions actionsToPerform; + + for (auto& epochToActionsWithLock : m_epochToActionsList) + { + Lock lock(*std::get<0>(epochToActionsWithLock)); + + // lower_bound() so that it is deleted up to but not including epochCounter. + auto& epochToActions = std::get<1>(epochToActionsWithLock); + const auto endIt = epochToActions.lower_bound(epochCounter); + + auto it = epochToActions.begin(); + + while (it != endIt) + { + actionsToPerform.insert( + actionsToPerform.end(), + std::make_move_iterator(it->second.begin()), + std::make_move_iterator(it->second.end())); + + // The following post increment is intentional to avoid iterator invalidation issue. + epochToActions.erase(it++); + } + } + + ApplyActions(actionsToPerform); + + return actionsToPerform.size(); +} + + +void EpochActionManager::ApplyActions(Actions& actions) +{ + for (auto& action : actions) + { + action(); + } +} + +} // namespace L4 \ No newline at end of file diff --git a/src/MurmurHash3.cpp b/src/MurmurHash3.cpp new file mode 100644 index 0000000..e08d5a3 --- /dev/null +++ b/src/MurmurHash3.cpp @@ -0,0 +1,334 @@ +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - The x86 and x64 versions do _not_ produce the same results, as the +// algorithms are optimized for their respective platforms. You can still +// compile and run any of them on any platform, but your performance with the +// non-native version will be less than optimal. + +#include "Utils/MurmurHash3.h" + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define FORCE_INLINE __forceinline + +#include + +#define ROTL32(x,y) _rotl(x,y) +#define ROTL64(x,y) _rotl64(x,y) + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define FORCE_INLINE __attribute__((always_inline)) + +inline uint32_t rotl32(uint32_t x, int8_t r) +{ + return (x << r) | (x >> (32 - r)); +} + +inline uint64_t rotl64(uint64_t x, int8_t r) +{ + return (x << r) | (x >> (64 - r)); +} + +#define ROTL32(x,y) rotl32(x,y) +#define ROTL64(x,y) rotl64(x,y) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- +// Block read - if your platform needs to do endian-swapping or can only +// handle aligned reads, do the conversion here + +FORCE_INLINE uint32_t getblock(const uint32_t * p, int i) +{ + return p[i]; +} + +FORCE_INLINE uint64_t getblock(const uint64_t * p, int i) +{ + return p[i]; +} + +//----------------------------------------------------------------------------- +// Finalization mix - force all bits of a hash block to avalanche + +FORCE_INLINE uint32_t fmix(uint32_t h) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +//---------- + +FORCE_INLINE uint64_t fmix(uint64_t k) +{ + k ^= k >> 33; + k *= BIG_CONSTANT(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + + return k; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_32(const void * key, int len, + uint32_t seed, void * out) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + uint32_t c1 = 0xcc9e2d51; + uint32_t c2 = 0x1b873593; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks * 4); + + for (int i = -nblocks; i; i++) + { + uint32_t k1 = getblock(blocks, i); + + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ROTL32(h1, 13); + h1 = h1 * 5 + 0xe6546b64; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks * 4); + + uint32_t k1 = 0; + + switch (len & 3) + { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; + k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; + + h1 = fmix(h1); + + *(uint32_t*)out = h1; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_128(const void * key, const int len, + uint32_t seed, void * out) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + uint32_t c1 = 0x239b961b; + uint32_t c2 = 0xab0e9789; + uint32_t c3 = 0x38b34ae5; + uint32_t c4 = 0xa1e38b93; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks * 16); + + for (int i = -nblocks; i; i++) + { + uint32_t k1 = getblock(blocks, i * 4 + 0); + uint32_t k2 = getblock(blocks, i * 4 + 1); + uint32_t k3 = getblock(blocks, i * 4 + 2); + uint32_t k4 = getblock(blocks, i * 4 + 3); + + k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1; + + h1 = ROTL32(h1, 19); h1 += h2; h1 = h1 * 5 + 0x561ccd1b; + + k2 *= c2; k2 = ROTL32(k2, 16); k2 *= c3; h2 ^= k2; + + h2 = ROTL32(h2, 17); h2 += h3; h2 = h2 * 5 + 0x0bcaa747; + + k3 *= c3; k3 = ROTL32(k3, 17); k3 *= c4; h3 ^= k3; + + h3 = ROTL32(h3, 15); h3 += h4; h3 = h3 * 5 + 0x96cd1c35; + + k4 *= c4; k4 = ROTL32(k4, 18); k4 *= c1; h4 ^= k4; + + h4 = ROTL32(h4, 13); h4 += h1; h4 = h4 * 5 + 0x32ac3b17; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks * 16); + + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch (len & 15) + { + case 15: k4 ^= tail[14] << 16; + case 14: k4 ^= tail[13] << 8; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = ROTL32(k4, 18); k4 *= c1; h4 ^= k4; + + case 12: k3 ^= tail[11] << 24; + case 11: k3 ^= tail[10] << 16; + case 10: k3 ^= tail[9] << 8; + case 9: k3 ^= tail[8] << 0; + k3 *= c3; k3 = ROTL32(k3, 17); k3 *= c4; h3 ^= k3; + + case 8: k2 ^= tail[7] << 24; + case 7: k2 ^= tail[6] << 16; + case 6: k2 ^= tail[5] << 8; + case 5: k2 ^= tail[4] << 0; + k2 *= c2; k2 = ROTL32(k2, 16); k2 *= c3; h2 ^= k2; + + case 4: k1 ^= tail[3] << 24; + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0] << 0; + k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = fmix(h1); + h2 = fmix(h2); + h3 = fmix(h3); + h4 = fmix(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + ((uint32_t*)out)[0] = h1; + ((uint32_t*)out)[1] = h2; + ((uint32_t*)out)[2] = h3; + ((uint32_t*)out)[3] = h4; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x64_128(const void * key, const int len, + const uint32_t seed, void * out) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + + uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); + uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); + + //---------- + // body + + const uint64_t * blocks = (const uint64_t *)(data); + + for (int i = 0; i < nblocks; i++) + { + uint64_t k1 = getblock(blocks, i * 2 + 0); + uint64_t k2 = getblock(blocks, i * 2 + 1); + + k1 *= c1; k1 = ROTL64(k1, 31); k1 *= c2; h1 ^= k1; + + h1 = ROTL64(h1, 27); h1 += h2; h1 = h1 * 5 + 0x52dce729; + + k2 *= c2; k2 = ROTL64(k2, 33); k2 *= c1; h2 ^= k2; + + h2 = ROTL64(h2, 31); h2 += h1; h2 = h2 * 5 + 0x38495ab5; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks * 16); + + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch (len & 15) + { + case 15: k2 ^= uint64_t(tail[14]) << 48; + case 14: k2 ^= uint64_t(tail[13]) << 40; + case 13: k2 ^= uint64_t(tail[12]) << 32; + case 12: k2 ^= uint64_t(tail[11]) << 24; + case 11: k2 ^= uint64_t(tail[10]) << 16; + case 10: k2 ^= uint64_t(tail[9]) << 8; + case 9: k2 ^= uint64_t(tail[8]) << 0; + k2 *= c2; k2 = ROTL64(k2, 33); k2 *= c1; h2 ^= k2; + + case 8: k1 ^= uint64_t(tail[7]) << 56; + case 7: k1 ^= uint64_t(tail[6]) << 48; + case 6: k1 ^= uint64_t(tail[5]) << 40; + case 5: k1 ^= uint64_t(tail[4]) << 32; + case 4: k1 ^= uint64_t(tail[3]) << 24; + case 3: k1 ^= uint64_t(tail[2]) << 16; + case 2: k1 ^= uint64_t(tail[1]) << 8; + case 1: k1 ^= uint64_t(tail[0]) << 0; + k1 *= c1; k1 = ROTL64(k1, 31); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = fmix(h1); + h2 = fmix(h2); + + h1 += h2; + h2 += h1; + + ((uint64_t*)out)[0] = h1; + ((uint64_t*)out)[1] = h2; +} + +//----------------------------------------------------------------------------- \ No newline at end of file diff --git a/src/PerfLogger.cpp b/src/PerfLogger.cpp new file mode 100644 index 0000000..47c9f55 --- /dev/null +++ b/src/PerfLogger.cpp @@ -0,0 +1,25 @@ +#include "Log/PerfLogger.h" +#include "Utils/Exception.h" +#include + +namespace L4 +{ + +// PerfData class implementation. + +void PerfData::AddHashTablePerfData(const char* hashTableName, const HashTablePerfData& perfData) +{ + auto result = m_hashTablesPerfData.insert( + std::make_pair( + hashTableName, + HashTablesPerfData::mapped_type(perfData))); + + if (!result.second) + { + boost::format err("Duplicate hash table name found: '%1%'."); + err % hashTableName; + throw RuntimeException(err.str()); + } +} + +} // namespace L4 \ No newline at end of file