diff --git a/Benchmark/Benchmark.vcxproj b/Benchmark/Benchmark.vcxproj
new file mode 100644
index 0000000..ff51c07
--- /dev/null
+++ b/Benchmark/Benchmark.vcxproj
@@ -0,0 +1,71 @@
+
+
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}
+
+
+
+ Application
+
+
+ true
+ v140
+
+
+ false
+ v140
+
+
+
+ L4.Benchmark
+
+
+
+ Console
+ true
+
+
+ MachineX64
+
+
+ $(SolutionDir)inc;$(SolutionDir)inc/L4;%(AdditionalIncludeDirectories)
+ _SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)
+ MaxSpeed
+ AnySuitable
+ true
+
+
+
+
+
+
+
+ {b7846115-88f1-470b-a625-9de0c29229bb}
+
+
+
+
+
+
+
+
+
+ This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Benchmark/Benchmark.vcxproj.filters b/Benchmark/Benchmark.vcxproj.filters
new file mode 100644
index 0000000..32a8145
--- /dev/null
+++ b/Benchmark/Benchmark.vcxproj.filters
@@ -0,0 +1,25 @@
+
+
+
+
+ {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
+ cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
+
+
+ {93995380-89BD-4b04-88EB-625FBE52EBFB}
+ h;hh;hpp;hxx;hm;inl;inc;xsd
+
+
+ {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
+ rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
+
+
+
+
+ Source Files
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Benchmark/main.cpp b/Benchmark/main.cpp
new file mode 100644
index 0000000..560c1aa
--- /dev/null
+++ b/Benchmark/main.cpp
@@ -0,0 +1,710 @@
+#include "L4/LocalMemory/HashTableService.h"
+#include "L4/Log/PerfCounter.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+class Timer
+{
+public:
+ Timer()
+ : m_start{ std::chrono::high_resolution_clock::now() }
+ {}
+
+ void Reset()
+ {
+ m_start = std::chrono::high_resolution_clock::now();
+ }
+
+ std::chrono::microseconds GetElapsedTime()
+ {
+ return std::chrono::duration_cast(
+ std::chrono::high_resolution_clock::now() - m_start);
+ }
+
+private:
+ std::chrono::time_point m_start;
+};
+
+
+class SynchronizedTimer
+{
+public:
+ SynchronizedTimer() = default;
+
+ void Start()
+ {
+ if (m_isStarted)
+ {
+ return;
+ }
+ m_isStarted = true;
+ m_startCount = std::chrono::high_resolution_clock::now().time_since_epoch().count();
+
+ }
+
+ void End()
+ {
+ m_endCount = std::chrono::high_resolution_clock::now().time_since_epoch().count();
+ }
+
+ std::chrono::microseconds GetElapsedTime()
+ {
+ std::chrono::nanoseconds start{ m_startCount };
+ std::chrono::nanoseconds end{ m_endCount };
+
+ return std::chrono::duration_cast(end - start);
+ }
+
+private:
+ std::atomic_bool m_isStarted = false;
+ std::atomic_uint64_t m_startCount;
+ std::atomic_uint64_t m_endCount;
+};
+
+
+struct PerThreadInfoForWriteTest
+{
+ std::thread m_thread;
+ std::size_t m_dataSetSize = 0;
+ std::chrono::microseconds m_totalTime;
+};
+
+
+struct PerThreadInfoForReadTest
+{
+ std::thread m_thread;
+ std::size_t m_dataSetSize = 0;
+ std::chrono::microseconds m_totalTime;
+};
+
+
+struct CommandLineOptions
+{
+ static constexpr std::size_t c_defaultDataSetSize = 1000000;
+ static constexpr std::uint32_t c_defaultNumBuckets = 1000000;
+ static constexpr std::uint16_t c_defaultKeySize = 16;
+ static constexpr std::uint32_t c_defaultValueSize = 100;
+ static constexpr bool c_defaultRandomizeValueSize = false;
+ static constexpr std::uint32_t c_defaultNumIterationsPerGetContext = 1;
+ static constexpr std::uint16_t c_defaultNumThreads = 1;
+ static constexpr std::uint32_t c_defaultEpochProcessingIntervalInMilli = 10;
+ static constexpr std::uint16_t c_defaultNumActionsQueue = 1;
+ static constexpr std::uint32_t c_defaultRecordTimeToLiveInSeconds = 300;
+ static constexpr std::uint64_t c_defaultCacheSizeInBytes = 1024 * 1024 * 1024;
+ static constexpr bool c_defaultForceTimeBasedEviction = false;
+
+ std::string m_module;
+ std::size_t m_dataSetSize = 0;
+ std::uint32_t m_numBuckets = 0;
+ std::uint16_t m_keySize = 0;
+ std::uint32_t m_valueSize = 0;
+ bool m_randomizeValueSize = false;
+ std::uint32_t m_numIterationsPerGetContext = 0;
+ std::uint16_t m_numThreads = 0;
+ std::uint32_t m_epochProcessingIntervalInMilli;
+ std::uint8_t m_numActionsQueue = 0;
+
+ // The followings are specific for cache hash tables.
+ std::uint32_t m_recordTimeToLiveInSeconds = 0U;
+ std::uint64_t m_cacheSizeInBytes = 0U;
+ bool m_forceTimeBasedEviction = false;
+
+ bool IsCachingModule() const
+ {
+ static const std::string c_cachingModulePrefix{ "cache" };
+ return m_module.substr(0, c_cachingModulePrefix.size()) == c_cachingModulePrefix;
+ }
+};
+
+
+class DataGenerator
+{
+public:
+ DataGenerator(
+ std::size_t dataSetSize,
+ std::uint16_t keySize,
+ std::uint32_t valueSize,
+ bool randomizeValueSize,
+ bool isDebugMode = false)
+ : m_dataSetSize{ dataSetSize }
+ , m_keySize{ keySize }
+ {
+ if (isDebugMode)
+ {
+ std::cout << "Generating data set with size = " << dataSetSize << std::endl;
+ }
+
+ Timer timer;
+
+ // Populate keys.
+ m_keys.resize(m_dataSetSize);
+ m_keysBuffer.resize(m_dataSetSize);
+ for (std::size_t i = 0; i < m_dataSetSize; ++i)
+ {
+ m_keysBuffer[i].resize(keySize);
+ std::generate(m_keysBuffer[i].begin(), m_keysBuffer[i].end(), std::rand);
+ std::snprintf(reinterpret_cast(m_keysBuffer[i].data()), keySize, "%llu", i);
+ m_keys[i].m_data = m_keysBuffer[i].data();
+ m_keys[i].m_size = m_keySize;
+ }
+
+ // Populate values buffer. Assumes srand() is already called.
+ std::generate(m_valuesBuffer.begin(), m_valuesBuffer.end(), std::rand);
+
+ // Populate values.
+ m_values.resize(m_dataSetSize);
+ std::size_t currentIndex = 0;
+ for (std::size_t i = 0; i < m_dataSetSize; ++i)
+ {
+ m_values[i].m_data = &m_valuesBuffer[currentIndex % c_valuesBufferSize];
+ m_values[i].m_size = randomizeValueSize ? rand() % valueSize : valueSize;
+ currentIndex += valueSize;
+ }
+
+ if (isDebugMode)
+ {
+ std::cout << "Finished generating data in "
+ << timer.GetElapsedTime().count() << " microseconds" << std::endl;
+ }
+ }
+
+ L4::IReadOnlyHashTable::Key GetKey(std::size_t index) const
+ {
+ return m_keys[index % m_dataSetSize];
+ }
+
+ L4::IReadOnlyHashTable::Value GetValue(std::size_t index) const
+ {
+ return m_values[index % m_dataSetSize];
+ }
+
+private:
+ std::size_t m_dataSetSize;
+ std::uint16_t m_keySize;
+
+ std::vector> m_keysBuffer;
+ std::vector m_keys;
+ std::vector m_values;
+
+ static const std::size_t c_valuesBufferSize = 64 * 1024;
+ std::array m_valuesBuffer;
+};
+
+
+void PrintHardwareInfo()
+{
+ SYSTEM_INFO sysInfo;
+ GetSystemInfo(&sysInfo);
+
+ printf("\n");
+ printf("Hardware information: \n");
+ printf("-------------------------------------\n");
+ printf("%22s | %10u |\n", "OEM ID", sysInfo.dwOemId);
+ printf("%22s | %10u |\n", "Number of processors", sysInfo.dwNumberOfProcessors);
+ printf("%22s | %10u |\n", "Page size", sysInfo.dwPageSize);
+ printf("%22s | %10u |\n", "Processor type", sysInfo.dwProcessorType);
+ printf("-------------------------------------\n");
+ printf("\n");
+}
+
+
+void PrintOptions(const CommandLineOptions& options)
+{
+ printf("------------------------------------------------------\n");
+
+ printf("%39s | %10llu |\n", "Data set size", options.m_dataSetSize);
+ printf("%39s | %10lu |\n", "Number of hash table buckets", options.m_numBuckets);
+ printf("%39s | %10lu |\n", "Key size", options.m_keySize);
+ printf("%39s | %10lu |\n", "Value type", options.m_valueSize);
+ printf("%39s | %10lu |\n", "Number of iterations per GetContext()", options.m_numIterationsPerGetContext);
+ printf("%39s | %10lu |\n", "Epoch processing interval (ms)", options.m_epochProcessingIntervalInMilli);
+ printf("%39s | %10lu |\n", "Number of actions queue", options.m_numActionsQueue);
+
+ if (options.IsCachingModule())
+ {
+ printf("%39s | %10lu |\n", "Record time to live (s)", options.m_recordTimeToLiveInSeconds);
+ printf("%39s | %10llu |\n", "Cache size in bytes", options.m_cacheSizeInBytes);
+ printf("%39s | %10lu |\n", "Force time-based eviction", options.m_forceTimeBasedEviction);
+ }
+
+ printf("------------------------------------------------------\n\n");
+}
+
+
+void PrintHashTableCounters(const L4::HashTablePerfData& perfData)
+{
+ printf("HashTableCounter:\n");
+ printf("----------------------------------------------------\n");
+ for (auto i = 0; i < static_cast(L4::HashTablePerfCounter::Count); ++i)
+ {
+ printf("%35s | %12llu |\n",
+ L4::c_hashTablePerfCounterNames[i],
+ perfData.Get(static_cast(i)));
+ }
+ printf("----------------------------------------------------\n\n");
+}
+
+
+L4::HashTableConfig CreateHashTableConfig(const CommandLineOptions& options)
+{
+ return L4::HashTableConfig(
+ "Table1",
+ L4::HashTableConfig::Setting{ options.m_numBuckets },
+ options.IsCachingModule()
+ ? boost::optional{
+ L4::HashTableConfig::Cache{
+ options.m_cacheSizeInBytes,
+ std::chrono::seconds{ options.m_recordTimeToLiveInSeconds },
+ options.m_forceTimeBasedEviction }}
+ : boost::none);
+}
+
+
+L4::EpochManagerConfig CreateEpochManagerConfig(const CommandLineOptions& options)
+{
+ return L4::EpochManagerConfig(
+ 10000U,
+ std::chrono::milliseconds(options.m_epochProcessingIntervalInMilli),
+ options.m_numActionsQueue);
+}
+
+
+void ReadPerfTest(const CommandLineOptions& options)
+{
+ printf("Performing read-perf which reads all the records inserted:\n");
+
+ PrintOptions(options);
+
+ auto dataGenerator = std::make_unique(
+ options.m_dataSetSize,
+ options.m_keySize,
+ options.m_valueSize,
+ options.m_randomizeValueSize);
+
+ L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options));
+ const auto hashTableIndex = service.AddHashTable(CreateHashTableConfig(options));
+
+ // Insert data set.
+ auto context = service.GetContext();
+ auto& hashTable = context[hashTableIndex];
+
+ std::vector randomIndices(options.m_dataSetSize);
+ for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i)
+ {
+ randomIndices[i] = i;
+ }
+ if (options.m_numThreads > 0)
+ {
+ // Randomize index only if multiple threads are running
+ // not to skew the results.
+ std::random_shuffle(randomIndices.begin(), randomIndices.end());
+ }
+
+ for (int i = 0; i < options.m_dataSetSize; ++i)
+ {
+ auto key = dataGenerator->GetKey(randomIndices[i]);
+ auto val = dataGenerator->GetValue(randomIndices[i]);
+
+ hashTable.Add(key, val);
+ }
+
+ std::vector allInfo;
+ allInfo.resize(options.m_numThreads);
+
+ SynchronizedTimer overallTimer;
+ std::mutex mutex;
+ std::condition_variable cv;
+ const auto isCachingModule = options.IsCachingModule();
+ bool isReady = false;
+
+ const std::size_t dataSetSizePerThread = options.m_dataSetSize / options.m_numThreads;
+ for (std::uint16_t i = 0; i < options.m_numThreads; ++i)
+ {
+ auto& info = allInfo[i];
+
+ std::size_t startIndex = i * dataSetSizePerThread;
+ info.m_dataSetSize = (i + 1 == options.m_numThreads)
+ ? options.m_dataSetSize - startIndex
+ : dataSetSizePerThread;
+
+ info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, &cv, &isReady, &overallTimer]
+ {
+ {
+ std::unique_lock lock(mutex);
+ cv.wait(lock, [&] { return isReady == true; });
+ }
+
+ overallTimer.Start();
+
+ Timer totalTimer;
+ Timer getTimer;
+
+ std::size_t iteration = 0;
+ bool isDone = false;
+
+ while (!isDone)
+ {
+ auto context = service.GetContext();
+ auto& hashTable = context[hashTableIndex];
+
+ for (std::uint32_t j = 0; !isDone && j < options.m_numIterationsPerGetContext; ++j)
+ {
+ auto key = dataGenerator->GetKey(startIndex + iteration);
+ L4::IReadOnlyHashTable::Value val;
+
+ if (!hashTable.Get(key, val) && !isCachingModule)
+ {
+ throw std::runtime_error("Look up failure is not allowed in this test.");
+ }
+
+ isDone = (++iteration == info.m_dataSetSize);
+ }
+ }
+
+ overallTimer.End();
+
+ info.m_totalTime = totalTimer.GetElapsedTime();
+ });
+ }
+
+ {
+ std::unique_lock lock(mutex);
+ isReady = true;
+ }
+
+ // Now, start the benchmarking for all threads.
+ cv.notify_all();
+
+ for (auto& info : allInfo)
+ {
+ info.m_thread.join();
+ }
+
+ PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData());
+
+ printf("Result:\n");
+ printf(" | Total | |\n");
+ printf(" | micros/op | microseconds | DataSetSize |\n");
+ printf(" -----------------------------------------------------------\n");
+
+ for (std::size_t i = 0; i < allInfo.size(); ++i)
+ {
+ const auto& info = allInfo[i];
+
+ printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n",
+ (i + 1),
+ static_cast(info.m_totalTime.count()) / info.m_dataSetSize,
+ info.m_totalTime.count(),
+ info.m_dataSetSize);
+ }
+ printf(" -----------------------------------------------------------\n");
+
+ printf(" Overall | %11.3f | %14llu | %13llu |\n",
+ static_cast(overallTimer.GetElapsedTime().count()) / options.m_dataSetSize,
+ overallTimer.GetElapsedTime().count(),
+ options.m_dataSetSize);
+}
+
+
+void WritePerfTest(const CommandLineOptions& options)
+{
+ if (options.m_module == "overwrite-perf")
+ {
+ printf("Performing overwrite-perf (writing data with unique keys, then overwrite data with same keys):\n");
+ }
+ else
+ {
+ printf("Performing write-perf (writing data with unique keys):\n");
+ }
+
+ PrintOptions(options);
+
+ auto dataGenerator = std::make_unique(
+ options.m_dataSetSize,
+ options.m_keySize,
+ options.m_valueSize,
+ options.m_randomizeValueSize);
+
+ L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options));
+ const auto hashTableIndex = service.AddHashTable(CreateHashTableConfig(options));
+
+ if (options.m_module == "overwrite-perf")
+ {
+ std::vector randomIndices(options.m_dataSetSize);
+ for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i)
+ {
+ randomIndices[i] = i;
+ }
+ if (options.m_numThreads > 0)
+ {
+ // Randomize index only if multiple threads are running
+ // not to skew the results.
+ std::random_shuffle(randomIndices.begin(), randomIndices.end());
+ }
+
+ auto context = service.GetContext();
+ auto& hashTable = context[hashTableIndex];
+
+ for (int i = 0; i < options.m_dataSetSize; ++i)
+ {
+ const auto index = randomIndices[i];
+ auto key = dataGenerator->GetKey(index);
+ auto val = dataGenerator->GetValue(index);
+
+ hashTable.Add(key, val);
+ }
+ }
+
+ std::vector allInfo;
+ allInfo.resize(options.m_numThreads);
+
+ SynchronizedTimer overallTimer;
+ std::mutex mutex;
+ std::condition_variable cv;
+ bool isReady = false;
+
+ const std::size_t dataSetSizePerThread = options.m_dataSetSize / options.m_numThreads;
+ for (std::uint16_t i = 0; i < options.m_numThreads; ++i)
+ {
+ auto& info = allInfo[i];
+
+ std::size_t startIndex = i * dataSetSizePerThread;
+ info.m_dataSetSize = (i + 1 == options.m_numThreads)
+ ? options.m_dataSetSize - startIndex
+ : dataSetSizePerThread;
+
+ info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, &cv, &isReady, &overallTimer]
+ {
+ {
+ std::unique_lock lock(mutex);
+ cv.wait(lock, [&] { return isReady == true; });
+ }
+
+ overallTimer.Start();
+
+ Timer totalTimer;
+ Timer addTimer;
+
+ std::size_t iteration = 0;
+ bool isDone = false;
+
+ while (!isDone)
+ {
+ auto context = service.GetContext();
+ auto& hashTable = context[hashTableIndex];
+
+ for (std::uint32_t j = 0; !isDone && j < options.m_numIterationsPerGetContext; ++j)
+ {
+ const auto index = startIndex + iteration;
+ auto key = dataGenerator->GetKey(index);
+ auto val = dataGenerator->GetValue(index);
+
+ hashTable.Add(key, val);
+
+ isDone = (++iteration == info.m_dataSetSize);
+ }
+ }
+
+ info.m_totalTime = totalTimer.GetElapsedTime();
+ overallTimer.End();
+ });
+ }
+
+ {
+ std::unique_lock lock(mutex);
+ isReady = true;
+ }
+
+ // Now, start the benchmarking for all threads.
+ cv.notify_all();
+
+ for (auto& info : allInfo)
+ {
+ info.m_thread.join();
+ }
+
+ PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData());
+
+ printf("Result:\n");
+ printf(" | Total | |\n");
+ printf(" | micros/op | microseconds | DataSetSize |\n");
+ printf(" -----------------------------------------------------------\n");
+
+ for (std::size_t i = 0; i < allInfo.size(); ++i)
+ {
+ const auto& info = allInfo[i];
+
+ printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n",
+ (i + 1),
+ static_cast(info.m_totalTime.count()) / info.m_dataSetSize,
+ info.m_totalTime.count(),
+ info.m_dataSetSize);
+ }
+ printf(" -----------------------------------------------------------\n");
+
+ printf(" Overall | %11.3f | %14llu | %13llu |\n",
+ static_cast(overallTimer.GetElapsedTime().count()) / options.m_dataSetSize,
+ overallTimer.GetElapsedTime().count(),
+ options.m_dataSetSize);
+
+ if (options.m_numThreads == 1)
+ {
+ auto& perfData = service.GetContext()[hashTableIndex].GetPerfData();
+ std::uint64_t totalBytes = perfData.Get(L4::HashTablePerfCounter::TotalKeySize)
+ + perfData.Get(L4::HashTablePerfCounter::TotalValueSize);
+
+ auto& info = allInfo[0];
+
+ double opsPerSec = static_cast(info.m_dataSetSize) / info.m_totalTime.count() * 1000000.0;
+ double MBPerSec = static_cast(totalBytes) / info.m_totalTime.count();
+ printf(" %10.3f ops/sec %10.3f MB/sec\n", opsPerSec, MBPerSec);
+ }
+}
+
+
+CommandLineOptions Parse(int argc, char** argv)
+{
+ namespace po = boost::program_options;
+
+ po::options_description general("General options");
+ general.add_options()
+ ("help", "produce a help message")
+ ("help-module", po::value(),
+ "produce a help for the following modules:\n"
+ " write-perf\n"
+ " overwrite-perf\n"
+ " read-perf\n"
+ " cache-read-perf\n"
+ " cache-write-perf\n")
+ ("module", po::value(),
+ "Runs the given module");
+
+ po::options_description benchmarkOptions("Benchmark options.");
+ benchmarkOptions.add_options()
+ ("dataSetSize", po::value()->default_value(CommandLineOptions::c_defaultDataSetSize), "data set size")
+ ("numBuckets", po::value()->default_value(CommandLineOptions::c_defaultNumBuckets), "number of buckets")
+ ("keySize", po::value()->default_value(CommandLineOptions::c_defaultKeySize), "key size in bytes")
+ ("valueSize", po::value()->default_value(CommandLineOptions::c_defaultValueSize), "value size in bytes")
+ ("randomizeValueSize", "randomize value size")
+ ("numIterationsPerGetContext", po::value()->default_value(CommandLineOptions::c_defaultNumIterationsPerGetContext), "number of iterations per GetContext()")
+ ("numThreads", po::value()->default_value(CommandLineOptions::c_defaultNumThreads), "number of threads to create")
+ ("epochProcessingInterval", po::value()->default_value(CommandLineOptions::c_defaultEpochProcessingIntervalInMilli), "epoch processing interval (ms)")
+ ("numActionsQueue", po::value()->default_value(CommandLineOptions::c_defaultNumActionsQueue), "number of actions queue")
+ ("recordTimeToLive", po::value()->default_value(CommandLineOptions::c_defaultRecordTimeToLiveInSeconds), "record time to live (s)")
+ ("cacheSize", po::value()->default_value(CommandLineOptions::c_defaultCacheSizeInBytes), "cache size in bytes")
+ ("forceTimeBasedEviction", po::value()->default_value(CommandLineOptions::c_defaultForceTimeBasedEviction), "force time based eviction");
+
+ po::options_description all("Allowed options");
+ all.add(general).add(benchmarkOptions);
+
+ po::variables_map vm;
+ po::store(po::parse_command_line(argc, argv, all), vm);
+ po::notify(vm);
+
+ CommandLineOptions options;
+
+ if (vm.count("help"))
+ {
+ std::cout << all;
+ }
+ else if (vm.count("module"))
+ {
+ options.m_module = vm["module"].as();
+
+ if (vm.count("dataSetSize"))
+ {
+ options.m_dataSetSize = vm["dataSetSize"].as();
+ }
+ if (vm.count("numBuckets"))
+ {
+ options.m_numBuckets = vm["numBuckets"].as();
+ }
+ if (vm.count("keySize"))
+ {
+ options.m_keySize = vm["keySize"].as();
+ }
+ if (vm.count("valueSize"))
+ {
+ options.m_valueSize = vm["valueSize"].as();
+ }
+ if (vm.count("randomizeValueSize"))
+ {
+ options.m_randomizeValueSize = true;
+ }
+ if (vm.count("numIterationsPerGetContext"))
+ {
+ options.m_numIterationsPerGetContext = vm["numIterationsPerGetContext"].as();
+ }
+ if (vm.count("numThreads"))
+ {
+ options.m_numThreads = vm["numThreads"].as();
+ }
+ if (vm.count("epochProcessingInterval"))
+ {
+ options.m_epochProcessingIntervalInMilli = vm["epochProcessingInterval"].as();
+ }
+ if (vm.count("numActionsQueue"))
+ {
+ options.m_numActionsQueue = vm["numActionsQueue"].as();
+ }
+ if (vm.count("recordTimeToLive"))
+ {
+ options.m_recordTimeToLiveInSeconds = vm["recordTimeToLive"].as();
+ }
+ if (vm.count("cacheSize"))
+ {
+ options.m_cacheSizeInBytes = vm["cacheSize"].as();
+ }
+ if (vm.count("forceTimeBasedEviction"))
+ {
+ options.m_forceTimeBasedEviction = vm["forceTimeBasedEviction"].as();
+ }
+ }
+ else
+ {
+ std::cout << all;
+ }
+
+ return options;
+}
+
+
+int main(int argc, char** argv)
+{
+ auto options = Parse(argc, argv);
+
+ if (options.m_module.empty())
+ {
+ return 0;
+ }
+
+ std::srand(static_cast(time(NULL)));
+
+ PrintHardwareInfo();
+
+ if (options.m_module == "write-perf"
+ || options.m_module == "overwrite-perf"
+ || options.m_module == "cache-write-perf")
+ {
+ WritePerfTest(options);
+ }
+ else if (options.m_module == "read-perf"
+ || options.m_module == "cache-read-perf")
+ {
+ ReadPerfTest(options);
+ }
+ else
+ {
+ std::cout << "Unknown module: " << options.m_module << std::endl;
+ }
+
+ return 0;
+}
+
diff --git a/Benchmark/packages.config b/Benchmark/packages.config
new file mode 100644
index 0000000..6f4997b
--- /dev/null
+++ b/Benchmark/packages.config
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Examples/Examples.vcxproj b/Examples/Examples.vcxproj
new file mode 100644
index 0000000..373f483
--- /dev/null
+++ b/Examples/Examples.vcxproj
@@ -0,0 +1,84 @@
+
+
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ {9672B9F5-84A6-4063-972C-A4DC23200B42}
+ Examples
+
+
+
+ Application
+ true
+ v140
+
+
+ Application
+ false
+ v140
+
+
+
+
+
+
+
+
+ true
+
+
+ false
+
+
+
+ $(SolutionDir)inc\L4;%(AdditionalIncludeDirectories)
+ _SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)
+ MaxSpeed
+ AnySuitable
+ true
+
+
+
+
+ Console
+ true
+
+
+
+
+
+
+
+ {b7846115-88f1-470b-a625-9de0c29229bb}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
+
+
+
+
+
\ No newline at end of file
diff --git a/Examples/Examples.vcxproj.filters b/Examples/Examples.vcxproj.filters
new file mode 100644
index 0000000..643b3c5
--- /dev/null
+++ b/Examples/Examples.vcxproj.filters
@@ -0,0 +1,39 @@
+
+
+
+
+ {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
+ cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
+
+
+ {93995380-89BD-4b04-88EB-625FBE52EBFB}
+ h;hh;hpp;hxx;hm;inl;inc;xsd
+
+
+ {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
+ rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
+
+
+
+
+ Source Files
+
+
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Examples/main.cpp b/Examples/main.cpp
new file mode 100644
index 0000000..369fa4b
--- /dev/null
+++ b/Examples/main.cpp
@@ -0,0 +1,98 @@
+#include "Log/IPerfLogger.h"
+#include "LocalMemory/HashTableService.h"
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+using namespace L4;
+
+class ConsolePerfLogger : public IPerfLogger
+{
+ virtual void Log(const IData& perfData) override
+ {
+ for (auto i = 0; i < static_cast(ServerPerfCounter::Count); ++i)
+ {
+ std::cout << c_serverPerfCounterNames[i] << ": "
+ << perfData.GetServerPerfData().Get(static_cast(i)) << std::endl;
+ }
+
+ const auto& hashTablesPerfData = perfData.GetHashTablesPerfData();
+
+ for (const auto& entry : hashTablesPerfData)
+ {
+ std::cout << "Hash table '" << entry.first << "'" << std::endl;
+
+ for (auto j = 0; j < static_cast(HashTablePerfCounter::Count); ++j)
+ {
+ std::cout << c_hashTablePerfCounterNames[j] << ": "
+ << entry.second.get().Get(static_cast(j)) << std::endl;
+ }
+ }
+
+ std::cout << std::endl;
+ }
+};
+
+
+
+int main(int argc, char** argv)
+{
+ (void)argc;
+ (void)argv;
+
+ LocalMemory::HashTableService service;
+
+ auto index = service.AddHashTable(
+ HashTableConfig("Table1", HashTableConfig::Setting{ 1000000 }));
+
+ static constexpr int keySize = 100;
+ static constexpr int valSize = 2000;
+
+ char bufKey[keySize];
+ char bufVal[valSize];
+
+ IWritableHashTable::Key key;
+ key.m_data = reinterpret_cast(bufKey);
+ IWritableHashTable::Value val;
+ val.m_data = reinterpret_cast(bufVal);
+
+ std::ifstream file;
+ file.open(argv[1], std::ifstream::in);
+ std::cout << "Opening " << argv[1] << std::endl;
+ static const int BufferLength = 4096;
+
+ char buffer[BufferLength];
+
+ auto totalTime = 0U;
+ int numLines = 0;
+ while (file.getline(buffer, BufferLength))
+ {
+ auto context = service.GetContext();
+
+ auto& hashTable = context[index];
+
+ char* nextToken = nullptr;
+ const char* keyStr = strtok_s(buffer, "\t", &nextToken);
+ const char* valStr = strtok_s(nullptr, "\t", &nextToken);
+
+ key.m_data = reinterpret_cast(keyStr);
+ key.m_size = static_cast(strlen(keyStr));
+
+ val.m_data = reinterpret_cast(valStr);
+ val.m_size = static_cast(strlen(valStr));
+
+ hashTable.Add(key, val);
+
+ ++numLines;
+ }
+
+ std::cout<< "Total Add() time" << totalTime << std::endl;
+ std::cout << "Added " << numLines << " lines." << std::endl;
+
+ return 0;
+}
+
diff --git a/Examples/packages.config b/Examples/packages.config
new file mode 100644
index 0000000..1e79042
--- /dev/null
+++ b/Examples/packages.config
@@ -0,0 +1,5 @@
+
+
+
+
+
\ No newline at end of file
diff --git a/L4.sln b/L4.sln
new file mode 100644
index 0000000..8424867
--- /dev/null
+++ b/L4.sln
@@ -0,0 +1,40 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 14
+VisualStudioVersion = 14.0.25420.1
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "L4", "Build\L4.vcxproj", "{B7846115-88F1-470B-A625-9DE0C29229BB}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Unittests", "Unittests\Unittests.vcxproj", "{8122529E-61CB-430B-A089-B12E63FC361B}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Examples", "Examples\Examples.vcxproj", "{9672B9F5-84A6-4063-972C-A4DC23200B42}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Benchmark", "Benchmark\Benchmark.vcxproj", "{B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|x64 = Debug|x64
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {B7846115-88F1-470B-A625-9DE0C29229BB}.Debug|x64.ActiveCfg = Debug|x64
+ {B7846115-88F1-470B-A625-9DE0C29229BB}.Debug|x64.Build.0 = Debug|x64
+ {B7846115-88F1-470B-A625-9DE0C29229BB}.Release|x64.ActiveCfg = Release|x64
+ {B7846115-88F1-470B-A625-9DE0C29229BB}.Release|x64.Build.0 = Release|x64
+ {8122529E-61CB-430B-A089-B12E63FC361B}.Debug|x64.ActiveCfg = Debug|x64
+ {8122529E-61CB-430B-A089-B12E63FC361B}.Debug|x64.Build.0 = Debug|x64
+ {8122529E-61CB-430B-A089-B12E63FC361B}.Release|x64.ActiveCfg = Release|x64
+ {8122529E-61CB-430B-A089-B12E63FC361B}.Release|x64.Build.0 = Release|x64
+ {9672B9F5-84A6-4063-972C-A4DC23200B42}.Debug|x64.ActiveCfg = Debug|x64
+ {9672B9F5-84A6-4063-972C-A4DC23200B42}.Debug|x64.Build.0 = Debug|x64
+ {9672B9F5-84A6-4063-972C-A4DC23200B42}.Release|x64.ActiveCfg = Release|x64
+ {9672B9F5-84A6-4063-972C-A4DC23200B42}.Release|x64.Build.0 = Release|x64
+ {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Debug|x64.ActiveCfg = Debug|x64
+ {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Debug|x64.Build.0 = Debug|x64
+ {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Release|x64.ActiveCfg = Release|x64
+ {B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/Unittests/CacheHashTableTest.cpp b/Unittests/CacheHashTableTest.cpp
new file mode 100644
index 0000000..5c6bb1d
--- /dev/null
+++ b/Unittests/CacheHashTableTest.cpp
@@ -0,0 +1,615 @@
+#include "stdafx.h"
+#include "Utils.h"
+#include "Mocks.h"
+#include "CheckedAllocator.h"
+#include "L4/HashTable/Common/Record.h"
+#include "L4/HashTable/Cache/Metadata.h"
+#include "L4/HashTable/Cache/HashTable.h"
+
+#include
+#include
+
+namespace L4
+{
+namespace UnitTests
+{
+
+using namespace HashTable::Cache;
+using namespace std::chrono;
+
+class MockClock
+{
+public:
+ MockClock() = default;
+
+ seconds GetCurrentEpochTime() const
+ {
+ return s_currentEpochTime;
+ }
+
+ static void SetEpochTime(seconds time)
+ {
+ s_currentEpochTime = time;
+ }
+
+ static void IncrementEpochTime(seconds increment)
+ {
+ s_currentEpochTime += increment;
+ }
+
+private:
+ static seconds s_currentEpochTime;
+};
+
+seconds MockClock::s_currentEpochTime{ 0U };
+
+
+class CacheHashTableTestFixture
+{
+public:
+ using Allocator = CheckedAllocator<>;
+ using CacheHashTable = WritableHashTable;
+ using ReadOnlyCacheHashTable = ReadOnlyHashTable;
+ using HashTable = CacheHashTable::HashTable;
+
+ CacheHashTableTestFixture()
+ : m_allocator{}
+ , m_hashTable { HashTable::Setting{ 100U }, m_allocator }
+ , m_epochManager{}
+ {
+ MockClock::SetEpochTime(seconds{ 0U });
+ }
+
+ CacheHashTableTestFixture(const CacheHashTableTestFixture&) = delete;
+ CacheHashTableTestFixture& operator=(const CacheHashTableTestFixture&) = delete;
+
+protected:
+ template
+ bool Get(TCacheHashTable& hashTable, const std::string& key, IReadOnlyHashTable::Value& value)
+ {
+ return hashTable.Get(
+ Utils::ConvertFromString(key.c_str()),
+ value);
+ }
+
+ void Add(CacheHashTable& hashTable, const std::string& key, const std::string& value)
+ {
+ hashTable.Add(
+ Utils::ConvertFromString(key.c_str()),
+ Utils::ConvertFromString(value.c_str()));
+ }
+
+ void Remove(CacheHashTable& hashTable, const std::string& key)
+ {
+ hashTable.Remove(Utils::ConvertFromString(key.c_str()));
+ }
+
+ template
+ bool CheckRecord(TCacheHashTable& hashTable, const std::string& key, const std::string& expectedValue)
+ {
+ IReadOnlyHashTable::Value value;
+ return Get(hashTable, key, value) && AreTheSame(value, expectedValue);
+ }
+
+ bool AreTheSame(const IReadOnlyHashTable::Value& actual, const std::string& expected)
+ {
+ return (actual.m_size == expected.size())
+ && !memcmp(actual.m_data, expected.c_str(), actual.m_size);
+ }
+
+ template
+ bool Exist(const Blob& actual, const std::vector& expectedSet)
+ {
+ const std::string actualStr(
+ reinterpret_cast(actual.m_data),
+ actual.m_size);
+
+ return std::find(expectedSet.cbegin(), expectedSet.cend(), actualStr) != expectedSet.cend();
+ }
+
+ Allocator m_allocator;
+ HashTable m_hashTable;
+ MockEpochManager m_epochManager;
+ MockClock m_clock;
+};
+
+
+BOOST_AUTO_TEST_SUITE(CacheHashTableTests)
+
+
+BOOST_AUTO_TEST_CASE(MetadataTest)
+{
+ std::vector buffer(20);
+
+ // The following will test with 1..8 byte alignments.
+ for (std::uint16_t i = 0U; i < 8U; ++i)
+ {
+ std::uint32_t* metadataBuffer = reinterpret_cast(buffer.data() + i);
+ seconds currentEpochTime{ 0x7FABCDEF };
+
+ Metadata metadata{ metadataBuffer, currentEpochTime };
+
+ BOOST_CHECK(currentEpochTime == metadata.GetEpochTime());
+
+ // 10 seconds have elapsed.
+ currentEpochTime += seconds{ 10U };
+
+ // Check the expiration based on the time to live value.
+ BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{ 15 }));
+ BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{ 10 }));
+ BOOST_CHECK(metadata.IsExpired(currentEpochTime, seconds{ 5U }));
+
+ // Test access state.
+ BOOST_CHECK(!metadata.IsAccessed());
+
+ metadata.UpdateAccessStatus(true);
+ BOOST_CHECK(metadata.IsAccessed());
+
+ metadata.UpdateAccessStatus(false);
+ BOOST_CHECK(!metadata.IsAccessed());
+ }
+}
+
+
+BOOST_FIXTURE_TEST_CASE(ExpirationTest, CacheHashTableTestFixture)
+{
+ // Don't care about evict in this test case, so make the cache size big.
+ constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF;
+ constexpr seconds c_recordTimeToLive{ 20U };
+
+ CacheHashTable hashTable(
+ m_hashTable,
+ m_epochManager,
+ c_maxCacheSizeInBytes,
+ c_recordTimeToLive,
+ false);
+
+ const std::vector> c_keyValuePairs =
+ {
+ { "key1", "value1" },
+ { "key2", "value2" },
+ { "key3", "value3" },
+ { "key4", "value4" },
+ { "key5", "value5" }
+ };
+
+ // Add 5 records at a different epoch time (10 seconds increment).
+ for (const auto& pair : c_keyValuePairs)
+ {
+ MockClock::IncrementEpochTime(seconds{ 10 });
+ Add(hashTable, pair.first, pair.second);
+
+ // Make sure the records can be retrieved right away. The record has not been
+ // expired since the clock hasn't moved yet.
+ BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second));
+ }
+
+ const auto& perfData = hashTable.GetPerfData();
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::CacheHitCount, 5 }
+ });
+
+ // Now we have the following data sets:
+ // | Key | Value | Creation time |
+ // | key1 | value1 | 10 |
+ // | key2 | value2 | 20 |
+ // | key3 | value3 | 30 |
+ // | key4 | value4 | 40 |
+ // | key5 | value5 | 50 |
+ // And the current clock is at 50.
+
+ // Do look ups and check expired records.
+ for (const auto& pair : c_keyValuePairs)
+ {
+ IReadOnlyHashTable::Value value;
+ // Our time to live value is 20, so key0 and key0 records should be expired.
+ if (pair.first == "key1" || pair.first == "key2")
+ {
+ BOOST_CHECK(!Get(hashTable, pair.first, value));
+ }
+ else
+ {
+ BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second));
+ }
+ }
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::CacheHitCount, 8 },
+ { HashTablePerfCounter::CacheMissCount, 2 }
+ });
+
+ MockClock::IncrementEpochTime(seconds{ 100 });
+
+ // All the records should be expired now.
+ for (const auto& pair : c_keyValuePairs)
+ {
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(!Get(hashTable, pair.first, value));
+ }
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::CacheHitCount, 8 },
+ { HashTablePerfCounter::CacheMissCount, 7 }
+ });
+}
+
+
+BOOST_FIXTURE_TEST_CASE(CacheHashTableIteratorTest, CacheHashTableTestFixture)
+{
+ // Don't care about evict in this test case, so make the cache size big.
+ constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF;
+ constexpr seconds c_recordTimeToLive{ 20U };
+
+ CacheHashTable hashTable(
+ m_hashTable,
+ m_epochManager,
+ c_maxCacheSizeInBytes,
+ c_recordTimeToLive,
+ false);
+
+ const std::vector c_keys = { "key1", "key2", "key3", "key4", "key5" };
+ const std::vector c_vals = { "val1", "val2", "val3", "val4", "val5" };
+
+ // Add 5 records at a different epoch time (3 seconds increment).
+ for (std::size_t i = 0; i < c_keys.size(); ++i)
+ {
+ MockClock::IncrementEpochTime(seconds{ 3 });
+ Add(hashTable, c_keys[i], c_vals[i]);
+ }
+
+ // Now we have the following data sets:
+ // | Key | Value | Creation time |
+ // | key1 | value1 | 3 |
+ // | key2 | value2 | 6 |
+ // | key3 | value3 | 9 |
+ // | key4 | value4 | 12 |
+ // | key5 | value5 | 15 |
+ // And the current clock is at 15.
+
+ auto iterator = hashTable.GetIterator();
+ std::uint16_t numRecords = 0;
+ while (iterator->MoveNext())
+ {
+ ++numRecords;
+ BOOST_CHECK(Exist(iterator->GetKey(), c_keys));
+ BOOST_CHECK(Exist(iterator->GetValue(), c_vals));
+ }
+
+ BOOST_CHECK_EQUAL(numRecords, 5);
+
+ // The clock becomes 30 and key1, key2 and key3 should expire.
+ MockClock::IncrementEpochTime(seconds{ 15 });
+
+ iterator = hashTable.GetIterator();
+ numRecords = 0;
+ while (iterator->MoveNext())
+ {
+ ++numRecords;
+ BOOST_CHECK(
+ Exist(
+ iterator->GetKey(),
+ std::vector{ c_keys.cbegin() + 2, c_keys.cend() }));
+ BOOST_CHECK(
+ Exist(
+ iterator->GetValue(),
+ std::vector{ c_vals.cbegin() + 2, c_vals.cend() }));
+ }
+
+ BOOST_CHECK_EQUAL(numRecords, 2);
+
+ // The clock becomes 40 and all records should be expired now.
+ MockClock::IncrementEpochTime(seconds{ 10 });
+
+ iterator = hashTable.GetIterator();
+ while (iterator->MoveNext())
+ {
+ BOOST_CHECK(false);
+ }
+}
+
+
+BOOST_FIXTURE_TEST_CASE(TimeBasedEvictionTest, CacheHashTableTestFixture)
+{
+ // We only care about time-based eviction in this test, so make the cache size big.
+ constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF;
+ constexpr seconds c_recordTimeToLive{ 10U };
+
+ // Hash table with one bucket makes testing the time-based eviction easy.
+ HashTable internalHashTable{ HashTable::Setting{ 1 }, m_allocator };
+ CacheHashTable hashTable(
+ internalHashTable,
+ m_epochManager,
+ c_maxCacheSizeInBytes,
+ c_recordTimeToLive,
+ true);
+
+ const std::vector> c_keyValuePairs =
+ {
+ { "key1", "value1" },
+ { "key2", "value2" },
+ { "key3", "value3" },
+ { "key4", "value4" },
+ { "key5", "value5" }
+ };
+
+ for (const auto& pair : c_keyValuePairs)
+ {
+ Add(hashTable, pair.first, pair.second);
+ BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second));
+ }
+
+ const auto& perfData = hashTable.GetPerfData();
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::CacheHitCount, 5 },
+ { HashTablePerfCounter::RecordsCount, 5 },
+ { HashTablePerfCounter::EvictedRecordsCount, 0 },
+ });
+
+ MockClock::IncrementEpochTime(seconds{ 20 });
+
+ // All the records should be expired now.
+ for (const auto& pair : c_keyValuePairs)
+ {
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(!Get(hashTable, pair.first, value));
+ }
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::CacheHitCount, 5 },
+ { HashTablePerfCounter::CacheMissCount, 5 },
+ { HashTablePerfCounter::RecordsCount, 5 },
+ { HashTablePerfCounter::EvictedRecordsCount, 0 },
+ });
+
+ // Now try to add one record and all the expired records should be evicted.
+ const auto& keyValuePair = c_keyValuePairs[0];
+ Add(hashTable, keyValuePair.first, keyValuePair.second);
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 1 },
+ { HashTablePerfCounter::EvictedRecordsCount, 5 },
+ });
+}
+
+
+BOOST_FIXTURE_TEST_CASE(EvcitAllRecordsTest, CacheHashTableTestFixture)
+{
+ const auto& perfData = m_hashTable.m_perfData;
+ const auto initialTotalIndexSize = perfData.Get(HashTablePerfCounter::TotalIndexSize);
+ const std::uint64_t c_maxCacheSizeInBytes = 500 + initialTotalIndexSize;
+ constexpr seconds c_recordTimeToLive{ 5 };
+
+ CacheHashTable hashTable{
+ m_hashTable,
+ m_epochManager,
+ c_maxCacheSizeInBytes,
+ c_recordTimeToLive,
+ false };
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::EvictedRecordsCount, 0 },
+ });
+
+ const std::vector> c_keyValuePairs =
+ {
+ { "key1", "value1" },
+ { "key2", "value2" },
+ { "key3", "value3" },
+ { "key4", "value4" },
+ { "key5", "value5" }
+ };
+
+ for (const auto& pair : c_keyValuePairs)
+ {
+ Add(hashTable, pair.first, pair.second);
+ }
+
+ using L4::HashTable::RecordSerializer;
+
+ // Variable key/value sizes.
+ const auto recordOverhead = RecordSerializer{ 0U, 0U }.CalculateRecordOverhead();
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, c_keyValuePairs.size() },
+ { HashTablePerfCounter::TotalIndexSize, initialTotalIndexSize + (c_keyValuePairs.size() * recordOverhead) },
+ { HashTablePerfCounter::EvictedRecordsCount, 0 },
+ });
+
+ // Make sure all data records added are present and update the access status for each
+ // record in order to test that accessed records are deleted when it's under memory constraint.
+ for (const auto& pair : c_keyValuePairs)
+ {
+ BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second));
+ }
+
+ // Now insert a record that will force all the records to be evicted due to size.
+ std::string bigRecordKeyStr(10, 'k');
+ std::string bigRecordValStr(500, 'v');
+
+ Add(hashTable, bigRecordKeyStr, bigRecordValStr);
+
+ // Make sure all the previously inserted records are evicted.
+ for (const auto& pair : c_keyValuePairs)
+ {
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(!Get(hashTable, pair.first, value));
+ }
+
+ // Make sure the big record is inserted.
+ BOOST_CHECK(CheckRecord(hashTable, bigRecordKeyStr, bigRecordValStr));
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 1 },
+ { HashTablePerfCounter::TotalIndexSize, initialTotalIndexSize + (1 * recordOverhead) },
+ { HashTablePerfCounter::EvictedRecordsCount, c_keyValuePairs.size() },
+ });
+}
+
+
+BOOST_FIXTURE_TEST_CASE(EvcitRecordsBasedOnAccessStatusTest, CacheHashTableTestFixture)
+{
+ const std::uint64_t c_maxCacheSizeInBytes
+ = 2000 + m_hashTable.m_perfData.Get(HashTablePerfCounter::TotalIndexSize);
+ const seconds c_recordTimeToLive{ 5 };
+
+ CacheHashTable hashTable(
+ m_hashTable,
+ m_epochManager,
+ c_maxCacheSizeInBytes,
+ c_recordTimeToLive,
+ false);
+
+ constexpr std::uint32_t c_valueSize = 100;
+ const std::string c_valStr(c_valueSize, 'v');
+ const auto& perfData = hashTable.GetPerfData();
+ std::uint16_t key = 1;
+
+ while ((static_cast(perfData.Get(HashTablePerfCounter::TotalIndexSize))
+ + perfData.Get(HashTablePerfCounter::TotalKeySize)
+ + perfData.Get(HashTablePerfCounter::TotalValueSize)
+ + c_valueSize)
+ < c_maxCacheSizeInBytes)
+ {
+ std::stringstream ss;
+ ss << "key" << key;
+ Add(hashTable, ss.str(), c_valStr);
+ ++key;
+ }
+
+ // Make sure no eviction happened.
+ BOOST_CHECK_EQUAL(m_epochManager.m_numRegisterActionsCalled, 0U);
+
+ // Look up with the "key1" key to update the access state.
+ BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr));
+
+ // Now add a new key, which triggers an eviction, but deletes other records than the "key1" record.
+ Add(hashTable, "newkey", c_valStr);
+
+ // Now, eviction should have happened.
+ BOOST_CHECK_GE(m_epochManager.m_numRegisterActionsCalled, 1U);
+
+ // The "key1" record should not have been evicted.
+ BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr));
+
+ // Make sure the new key is actually added.
+ BOOST_CHECK(CheckRecord(hashTable, "newkey", c_valStr));
+}
+
+
+// This is similar to the one in ReadWriteHashTableTest, but necessary since cache store adds the meta values.
+BOOST_FIXTURE_TEST_CASE(FixedKeyValueHashTableTest, CacheHashTableTestFixture)
+{
+ // Fixed 4 byte keys and 6 byte values.
+ std::vector settings =
+ {
+ HashTable::Setting{ 100, 200, 4, 0 },
+ HashTable::Setting{ 100, 200, 0, 6 },
+ HashTable::Setting{ 100, 200, 4, 6 }
+ };
+
+ for (const auto& setting : settings)
+ {
+ // Don't care about evict in this test case, so make the cache size big.
+ constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF;
+ constexpr seconds c_recordTimeToLive{ 20U };
+
+ HashTable hashTable{ setting, m_allocator };
+ CacheHashTable writableHashTable{
+ hashTable,
+ m_epochManager,
+ c_maxCacheSizeInBytes,
+ c_recordTimeToLive,
+ false };
+
+ ReadOnlyCacheHashTable readOnlyHashTable{ hashTable, c_recordTimeToLive };
+
+ constexpr std::uint8_t c_numRecords = 10;
+
+ // Add records.
+ for (std::uint8_t i = 0; i < c_numRecords; ++i)
+ {
+ Add(writableHashTable, "key" + std::to_string(i), "value" + std::to_string(i));
+ }
+
+ Utils::ValidateCounters(
+ writableHashTable.GetPerfData(),
+ {
+ { HashTablePerfCounter::RecordsCount, 10 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::TotalKeySize, 40 },
+ { HashTablePerfCounter::TotalValueSize, 100 },
+ { HashTablePerfCounter::MinKeySize, 4 },
+ { HashTablePerfCounter::MaxKeySize, 4 },
+ { HashTablePerfCounter::MinValueSize, 10 },
+ { HashTablePerfCounter::MaxValueSize, 10 }
+ });
+
+ // Validate all the records added.
+ for (std::uint8_t i = 0; i < c_numRecords; ++i)
+ {
+ CheckRecord(readOnlyHashTable, "key" + std::to_string(i), "value" + std::to_string(i));
+ }
+
+ // Remove first half of the records.
+ for (std::uint8_t i = 0; i < c_numRecords / 2; ++i)
+ {
+ Remove(writableHashTable, "key" + std::to_string(i));
+ }
+
+ Utils::ValidateCounters(
+ writableHashTable.GetPerfData(),
+ {
+ { HashTablePerfCounter::RecordsCount, 5 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::TotalKeySize, 20 },
+ { HashTablePerfCounter::TotalValueSize, 50 }
+ });
+
+ // Verify the records.
+ for (std::uint8_t i = 0; i < c_numRecords; ++i)
+ {
+ if (i < (c_numRecords / 2))
+ {
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value));
+ }
+ else
+ {
+ CheckRecord(readOnlyHashTable, "key" + std::to_string(i), "value" + std::to_string(i));
+ }
+ }
+
+ // Expire all the records.
+ MockClock::IncrementEpochTime(seconds{ 100 });
+
+ // Verify the records.
+ for (std::uint8_t i = 0; i < c_numRecords; ++i)
+ {
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value));
+ }
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+} // namespace UnitTests
+} // namespace L4
diff --git a/Unittests/CheckedAllocator.h b/Unittests/CheckedAllocator.h
new file mode 100644
index 0000000..7bb986f
--- /dev/null
+++ b/Unittests/CheckedAllocator.h
@@ -0,0 +1,67 @@
+#pragma once
+
+#include
+#include
+#include
+
+namespace L4
+{
+namespace UnitTests
+{
+
+struct AllocationAddressHolder : public std::set
+{
+ ~AllocationAddressHolder()
+ {
+ BOOST_REQUIRE(empty());
+ }
+};
+
+template
+class CheckedAllocator : public std::allocator
+{
+public:
+ using Base = std::allocator;
+
+ template
+ struct rebind
+ {
+ typedef CheckedAllocator other;
+ };
+
+ CheckedAllocator()
+ : m_allocationAddresses{ std::make_shared() }
+ {}
+
+ CheckedAllocator(const CheckedAllocator&) = default;
+
+ template
+ CheckedAllocator(const CheckedAllocator& other)
+ : m_allocationAddresses{ other.m_allocationAddresses }
+ {}
+
+ template
+ CheckedAllocator& operator=(const CheckedAllocator& other)
+ {
+ m_allocationAddresses = other.m_allocationAddresses;
+ return (*this);
+ }
+
+ pointer allocate(std::size_t count, std::allocator::const_pointer hint = 0)
+ {
+ auto address = Base::allocate(count, hint);
+ BOOST_REQUIRE(m_allocationAddresses->insert(address).second);
+ return address;
+ }
+
+ void deallocate(pointer ptr, std::size_t count)
+ {
+ BOOST_REQUIRE(m_allocationAddresses->erase(ptr) == 1);
+ Base::deallocate(ptr, count);
+ }
+
+ std::shared_ptr m_allocationAddresses;
+};
+
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/EpochManagerTest.cpp b/Unittests/EpochManagerTest.cpp
new file mode 100644
index 0000000..6803b90
--- /dev/null
+++ b/Unittests/EpochManagerTest.cpp
@@ -0,0 +1,187 @@
+#include "stdafx.h"
+#include "Utils.h"
+#include "L4/Epoch/EpochQueue.h"
+#include "L4/Epoch/EpochActionManager.h"
+#include "L4/LocalMemory/EpochManager.h"
+#include "L4/Log/PerfCounter.h"
+#include "L4/Utils/Lock.h"
+#include
+
+namespace L4
+{
+namespace UnitTests
+{
+
+BOOST_AUTO_TEST_SUITE(EpochManagerTests)
+
+BOOST_AUTO_TEST_CASE(EpochRefManagerTest)
+{
+ std::uint64_t currentEpochCounter = 5U;
+ const std::uint32_t c_epochQueueSize = 100U;
+
+ using EpochQueue = EpochQueue<
+ boost::shared_lock_guard,
+ std::lock_guard>;
+
+ EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize);
+
+ // Initially the ref count at the current epoch counter should be 0.
+ BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U);
+
+ EpochRefManager epochManager(epochQueue);
+
+ BOOST_CHECK_EQUAL(epochManager.AddRef(), currentEpochCounter);
+
+ // Validate that a reference count is incremented at the current epoch counter.
+ BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 1U);
+
+ epochManager.RemoveRef(currentEpochCounter);
+
+ // Validate that a reference count is back to 0.
+ BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U);
+
+ // Decrementing a reference counter when it is already 0 will result in an exception.
+ CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
+ epochManager.RemoveRef(currentEpochCounter);,
+ "Reference counter is invalid.");
+}
+
+
+BOOST_AUTO_TEST_CASE(EpochCounterManagerTest)
+{
+ std::uint64_t currentEpochCounter = 0U;
+ const std::uint32_t c_epochQueueSize = 100U;
+
+ using EpochQueue = EpochQueue<
+ boost::shared_lock_guard,
+ std::lock_guard>;
+
+ EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize);
+
+ EpochCounterManager epochCounterManager(epochQueue);
+
+ // If RemoveUnreferenceEpochCounters() is called when m_fonrtIndex and m_backIndex are
+ // the same, it will just return either value.
+ BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
+
+ // Add two epoch counts.
+ ++currentEpochCounter;
+ ++currentEpochCounter;
+ epochCounterManager.AddNewEpoch();
+ epochCounterManager.AddNewEpoch();
+
+ BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, 0U);
+ BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
+ BOOST_CHECK_EQUAL(epochQueue.m_refCounts[epochQueue.m_frontIndex], 0U);
+
+ // Since the m_frontIndex's reference count was zero, it will be incremented
+ // all the way to currentEpochCounter.
+ BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
+ BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
+ BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
+
+ EpochRefManager epochRefManager(epochQueue);
+
+ // Now add a reference at the currentEpochCounter;
+ const auto epochCounterReferenced = epochRefManager.AddRef();
+ BOOST_CHECK_EQUAL(epochCounterReferenced, currentEpochCounter);
+
+ // Calling RemoveUnreferenceEpochCounters() should just return currentEpochCounter
+ // since m_frontIndex and m_backIndex is the same. (Not affected by adding a reference yet).
+ BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
+ BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
+ BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
+
+ // Add one epoch count.
+ ++currentEpochCounter;
+ epochCounterManager.AddNewEpoch();
+
+ // Now RemoveUnreferenceEpochCounters() should return epochCounterReferenced because
+ // of the reference count.
+ BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), epochCounterReferenced);
+ BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, epochCounterReferenced);
+ BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
+
+ // Remove the reference.
+ epochRefManager.RemoveRef(epochCounterReferenced);
+
+ // Now RemoveUnreferenceEpochCounters() should return currentEpochCounter and m_frontIndex
+ // should be in sync with m_backIndex.
+ BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
+ BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
+ BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
+}
+
+
+BOOST_AUTO_TEST_CASE(EpochActionManagerTest)
+{
+ EpochActionManager actionManager(2U);
+
+ bool isAction1Called = false;
+ bool isAction2Called = false;
+
+ auto action1 = [&]() { isAction1Called = true; };
+ auto action2 = [&]() { isAction2Called = true; };
+
+ // Register action1 and action2 at epoch count 5 and 6 respectively.
+ actionManager.RegisterAction(5U, action1);
+ actionManager.RegisterAction(6U, action2);
+
+ BOOST_CHECK(!isAction1Called && !isAction2Called);
+
+ actionManager.PerformActions(4);
+ BOOST_CHECK(!isAction1Called && !isAction2Called);
+
+ actionManager.PerformActions(5);
+ BOOST_CHECK(!isAction1Called && !isAction2Called);
+
+ actionManager.PerformActions(6);
+ BOOST_CHECK(isAction1Called && !isAction2Called);
+
+ actionManager.PerformActions(7);
+ BOOST_CHECK(isAction1Called && isAction2Called);
+}
+
+
+BOOST_AUTO_TEST_CASE(EpochManagerTest)
+{
+ ServerPerfData perfData;
+ LocalMemory::EpochManager epochManager(
+ EpochManagerConfig(100000U, std::chrono::milliseconds(5U), 1U),
+ perfData);
+
+ std::atomic isActionCalled = false;
+ auto action = [&]() { isActionCalled = true; };
+
+ auto epochCounterReferenced = epochManager.GetEpochRefManager().AddRef();
+
+ epochManager.RegisterAction(action);
+
+ // Justification for using sleep_for in unit tests:
+ // - EpochManager already uses an internal thread which wakes up and perform a task
+ // in a given interval and when the class is destroyed, there is a mechanism for
+ // waiting for the thread anyway. It's more crucial to test the end to end scenario this way.
+ // - The overall execution time for this test is less than 50 milliseconds.
+ auto initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue);
+ while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2)
+ {
+ std::this_thread::sleep_for(std::chrono::milliseconds(5));
+ }
+
+ BOOST_CHECK(!isActionCalled);
+
+ epochManager.GetEpochRefManager().RemoveRef(epochCounterReferenced);
+
+ initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue);
+ while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2)
+ {
+ std::this_thread::sleep_for(std::chrono::milliseconds(5));
+ }
+
+ BOOST_CHECK(isActionCalled);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+} // namespace UnitTests
+} // namespace L4
diff --git a/Unittests/HashTableManagerTest.cpp b/Unittests/HashTableManagerTest.cpp
new file mode 100644
index 0000000..35c126c
--- /dev/null
+++ b/Unittests/HashTableManagerTest.cpp
@@ -0,0 +1,65 @@
+#include "stdafx.h"
+#include "Utils.h"
+#include "Mocks.h"
+#include "L4/HashTable/Config.h"
+#include "L4/HashTable/IHashTable.h"
+#include "L4/LocalMemory/HashTableManager.h"
+
+namespace L4
+{
+namespace UnitTests
+{
+
+template
+static void ValidateRecord(
+ const Store& store,
+ const char* expectedKeyStr,
+ const char* expectedValueStr)
+{
+ IReadOnlyHashTable::Value actualValue;
+ auto expectedValue = Utils::ConvertFromString(expectedValueStr);
+ BOOST_CHECK(store.Get(Utils::ConvertFromString(expectedKeyStr), actualValue));
+ BOOST_CHECK(actualValue.m_size == expectedValue.m_size);
+ BOOST_CHECK(!memcmp(actualValue.m_data, expectedValue.m_data, expectedValue.m_size));
+}
+
+BOOST_AUTO_TEST_CASE(HashTableManagerTest)
+{
+ MockEpochManager epochManager;
+ PerfData perfData;
+
+ LocalMemory::HashTableManager htManager;
+ const auto ht1Index = htManager.Add(
+ HashTableConfig("HashTable1", HashTableConfig::Setting(100U)),
+ epochManager,
+ std::allocator());
+ const auto ht2Index = htManager.Add(
+ HashTableConfig("HashTable2", HashTableConfig::Setting(200U)),
+ epochManager,
+ std::allocator());
+
+ {
+ auto& hashTable1 = htManager.GetHashTable("HashTable1");
+ hashTable1.Add(
+ Utils::ConvertFromString("HashTable1Key"),
+ Utils::ConvertFromString("HashTable1Value"));
+
+ auto& hashTable2 = htManager.GetHashTable("HashTable2");
+ hashTable2.Add(
+ Utils::ConvertFromString("HashTable2Key"),
+ Utils::ConvertFromString("HashTable2Value"));
+ }
+
+ ValidateRecord(
+ htManager.GetHashTable(ht1Index),
+ "HashTable1Key",
+ "HashTable1Value");
+
+ ValidateRecord(
+ htManager.GetHashTable(ht2Index),
+ "HashTable2Key",
+ "HashTable2Value");
+}
+
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/HashTableRecordTest.cpp b/Unittests/HashTableRecordTest.cpp
new file mode 100644
index 0000000..3f678c1
--- /dev/null
+++ b/Unittests/HashTableRecordTest.cpp
@@ -0,0 +1,163 @@
+#include "stdafx.h"
+#include "L4/HashTable/Common/Record.h"
+#include "Utils.h"
+#include
+#include
+#include
+
+namespace L4
+{
+namespace UnitTests
+{
+
+using namespace HashTable;
+
+class HashTableRecordTestFixture
+{
+protected:
+ void Run(bool isFixedKey, bool isFixedValue, bool useMetaValue)
+ {
+ BOOST_TEST_MESSAGE(
+ "Running with isFixedKey=" << isFixedKey
+ << ", isFixedValue=" << isFixedValue
+ << ", useMetatValue=" << useMetaValue);
+
+ const std::string key = "TestKey";
+ const std::string value = "TestValue";
+ const std::string metaValue = "TestMetavalue";
+
+ const auto recordOverhead = (isFixedKey ? 0U : c_keyTypeSize) + (isFixedValue ? 0U : c_valueTypeSize);
+
+ Validate(
+ RecordSerializer{
+ isFixedKey ? static_cast(key.size()) : 0U,
+ isFixedValue ? static_cast(value.size()) : 0U,
+ useMetaValue ? static_cast(metaValue.size()) : 0U },
+ key,
+ value,
+ recordOverhead + key.size() + value.size() + (useMetaValue ? metaValue.size() : 0U),
+ recordOverhead,
+ useMetaValue ? boost::optional{ metaValue } : boost::none);
+ }
+
+private:
+ void Validate(
+ const RecordSerializer& serializer,
+ const std::string& keyStr,
+ const std::string& valueStr,
+ std::size_t expectedBufferSize,
+ std::size_t expectedRecordOverheadSize,
+ boost::optional metadataStr = boost::none)
+ {
+ BOOST_CHECK_EQUAL(serializer.CalculateRecordOverhead(), expectedRecordOverheadSize);
+
+ const auto key = Utils::ConvertFromString(keyStr.c_str());
+ const auto value = Utils::ConvertFromString(valueStr.c_str());
+
+ const auto bufferSize = serializer.CalculateBufferSize(key, value);
+
+ BOOST_REQUIRE_EQUAL(bufferSize, expectedBufferSize);
+ std::vector buffer(bufferSize);
+
+ RecordBuffer* recordBuffer = nullptr;
+
+ if (metadataStr)
+ {
+ auto metaValue = Utils::ConvertFromString(metadataStr->c_str());
+ recordBuffer = serializer.Serialize(key, value, metaValue, buffer.data(), bufferSize);
+ }
+ else
+ {
+ recordBuffer = serializer.Serialize(key, value, buffer.data(), bufferSize);
+ }
+
+ const auto record = serializer.Deserialize(*recordBuffer);
+
+ // Make sure the data serialized is in different memory location.
+ BOOST_CHECK(record.m_key.m_data != key.m_data);
+ BOOST_CHECK(record.m_value.m_data != value.m_data);
+
+ BOOST_CHECK(record.m_key == key);
+ if (metadataStr)
+ {
+ const std::string newValueStr = *metadataStr + valueStr;
+ const auto newValue = Utils::ConvertFromString(newValueStr.c_str());
+ BOOST_CHECK(record.m_value == newValue);
+ }
+ else
+ {
+ BOOST_CHECK(record.m_value == value);
+ }
+ }
+
+ static constexpr std::size_t c_keyTypeSize = sizeof(Record::Key::size_type);
+ static constexpr std::size_t c_valueTypeSize = sizeof(Record::Value::size_type);
+};
+
+BOOST_FIXTURE_TEST_SUITE(HashTableRecordTests, HashTableRecordTestFixture)
+
+BOOST_AUTO_TEST_CASE(RunAll)
+{
+ // Run all permutations for Run(), which takes three booleans.
+ for (int i = 0; i < 8; ++i)
+ {
+ Run(
+ !!((i >> 2) & 1),
+ !!((i >> 1) & 1),
+ !!((i) & 1));
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE(InvalidSizeTest)
+{
+ std::vector buffer(100U);
+
+ RecordSerializer serializer{ 4, 5 };
+
+ const std::string keyStr = "1234";
+ const std::string invalidStr = "999999";
+ const std::string valueStr = "12345";
+
+ const auto key = Utils::ConvertFromString(keyStr.c_str());
+ const auto value = Utils::ConvertFromString(valueStr.c_str());
+
+ const auto invalidKey = Utils::ConvertFromString(invalidStr.c_str());
+ const auto invalidValue = Utils::ConvertFromString(invalidStr.c_str());
+
+ CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
+ serializer.Serialize(invalidKey, value, buffer.data(), buffer.size()),
+ "Invalid key or value sizes are given.");
+
+ CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
+ serializer.Serialize(key, invalidValue, buffer.data(), buffer.size()),
+ "Invalid key or value sizes are given.");
+
+ CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
+ serializer.Serialize(invalidKey, invalidValue, buffer.data(), buffer.size()),
+ "Invalid key or value sizes are given.");
+
+ // Normal case shouldn't thrown an exception.
+ serializer.Serialize(key, value, buffer.data(), buffer.size());
+
+ RecordSerializer serializerWithMetaValue{ 4, 5, 2 };
+ std::uint16_t metadata = 0;
+
+ Record::Value metaValue{
+ reinterpret_cast(&metadata),
+ sizeof(metadata) };
+
+ // Normal case shouldn't thrown an exception.
+ serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size());
+
+ // Mismatching size is given.
+ metaValue.m_size = 1;
+ CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
+ serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size()),
+ "Invalid meta value size is given.");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/HashTableServiceTest.cpp b/Unittests/HashTableServiceTest.cpp
new file mode 100644
index 0000000..2d15008
--- /dev/null
+++ b/Unittests/HashTableServiceTest.cpp
@@ -0,0 +1,52 @@
+#include "stdafx.h"
+#include "Mocks.h"
+#include "Utils.h"
+#include "L4/LocalMemory/HashTableService.h"
+#include
+#include
+
+namespace L4
+{
+namespace UnitTests
+{
+
+BOOST_AUTO_TEST_CASE(HashTableServiceTest)
+{
+ std::vector> dataSet;
+ for (std::uint16_t i = 0U; i < 100; ++i)
+ {
+ dataSet.emplace_back("key" + std::to_string(i), "value" + std::to_string(i));
+ }
+
+ LocalMemory::HashTableService htService;
+ htService.AddHashTable(
+ HashTableConfig("Table1", HashTableConfig::Setting{ 100U }));
+ htService.AddHashTable(
+ HashTableConfig(
+ "Table2",
+ HashTableConfig::Setting{ 1000U },
+ HashTableConfig::Cache{ 1024, std::chrono::seconds{ 1U }, false }));
+
+ for (const auto& data : dataSet)
+ {
+ htService.GetContext()["Table1"].Add(
+ Utils::ConvertFromString(data.first.c_str()),
+ Utils::ConvertFromString(data.second.c_str()));
+ }
+
+ // Smoke tests for looking up the data .
+ {
+ auto context = htService.GetContext();
+ for (const auto& data : dataSet)
+ {
+ IReadOnlyHashTable::Value val;
+ BOOST_CHECK(context["Table1"].Get(
+ Utils::ConvertFromString(data.first.c_str()),
+ val));
+ BOOST_CHECK(Utils::ConvertToString(val) == data.second);
+ }
+ }
+}
+
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/Mocks.h b/Unittests/Mocks.h
new file mode 100644
index 0000000..efd1fe7
--- /dev/null
+++ b/Unittests/Mocks.h
@@ -0,0 +1,164 @@
+#pragma once
+
+#include "stdafx.h"
+#include "L4/Epoch/IEpochActionManager.h"
+#include "L4/Log/PerfLogger.h"
+#include "L4/Serialization/IStream.h"
+
+namespace L4
+{
+namespace UnitTests
+{
+
+class MockPerfLogger : public IPerfLogger
+{
+ virtual void Log(const IData& data) override
+ {
+ (void)data;
+ }
+};
+
+struct MockEpochManager : public IEpochActionManager
+{
+ MockEpochManager()
+ : m_numRegisterActionsCalled(0)
+ {
+ }
+
+ virtual void RegisterAction(Action&& action) override
+ {
+ ++m_numRegisterActionsCalled;
+ action();
+ };
+
+ std::uint16_t m_numRegisterActionsCalled;
+};
+
+class StreamBase
+{
+public:
+ using StreamBuffer = std::vector;
+
+protected:
+ StreamBase() = default;
+
+ void Begin()
+ {
+ m_isBeginCalled = !m_isBeginCalled;
+ if (!m_isBeginCalled)
+ {
+ BOOST_FAIL("Begin() is called multiple times.");
+ }
+ }
+
+ void End()
+ {
+ if (!m_isBeginCalled)
+ {
+ BOOST_FAIL("Begin() is not called yet.");
+ }
+
+ m_isEndCalled = !m_isEndCalled;
+ if (!m_isEndCalled)
+ {
+ BOOST_FAIL("End() is called multiple times.");
+ }
+ }
+
+ void Validate()
+ {
+ if (!m_isBeginCalled)
+ {
+ BOOST_FAIL("Begin() is not called yet.");
+ }
+
+ if (m_isEndCalled)
+ {
+ BOOST_FAIL("End() is already called.");
+ }
+ }
+
+ bool IsValid() const
+ {
+ return m_isBeginCalled && m_isEndCalled;
+ }
+
+ bool m_isBeginCalled = false;
+ bool m_isEndCalled = false;
+};
+
+
+class MockStreamWriter : public IStreamWriter, private StreamBase
+{
+public:
+ virtual void Begin() override
+ {
+ StreamBase::Begin();
+ }
+
+ virtual void End() override
+ {
+ StreamBase::End();
+ }
+
+ virtual void Write(const std::uint8_t buffer[], std::size_t bufferSize) override
+ {
+ StreamBase::Validate();
+ m_buffer.insert(m_buffer.end(), buffer, buffer + bufferSize);
+ }
+
+ bool IsValid() const
+ {
+ return StreamBase::IsValid();
+ }
+
+ const StreamBuffer& GetStreamBuffer() const
+ {
+ return m_buffer;
+ }
+
+private:
+ StreamBuffer m_buffer;
+};
+
+
+class MockStreamReader : public IStreamReader, private StreamBase
+{
+public:
+ explicit MockStreamReader(const StreamBuffer& buffer)
+ : m_buffer(buffer),
+ m_bufferIter(m_buffer.cbegin())
+ {
+ }
+
+ virtual void Begin() override
+ {
+ StreamBase::Begin();
+ }
+
+ virtual void End() override
+ {
+ // Make sure every thing is read from stream.
+ BOOST_REQUIRE(m_bufferIter == m_buffer.end());
+ StreamBase::End();
+ }
+
+ virtual void Read(std::uint8_t buffer[], std::size_t bufferSize) override
+ {
+ StreamBase::Validate();
+ std::copy(m_bufferIter, m_bufferIter + bufferSize, buffer);
+ m_bufferIter += bufferSize;
+ }
+
+ bool IsValid() const
+ {
+ return StreamBase::IsValid();
+ }
+
+private:
+ StreamBuffer m_buffer;
+ StreamBuffer::const_iterator m_bufferIter;
+};
+
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/PerfInfoTest.cpp b/Unittests/PerfInfoTest.cpp
new file mode 100644
index 0000000..afc3cbc
--- /dev/null
+++ b/Unittests/PerfInfoTest.cpp
@@ -0,0 +1,104 @@
+#include "stdafx.h"
+#include "L4/Log/PerfLogger.h"
+#include
+
+namespace L4
+{
+namespace LocalMemory
+{
+
+void CheckMinCounters(const HashTablePerfData& htPerfData)
+{
+ const auto maxValue = (std::numeric_limits::max)();
+ /// Check if the min counter values are correctly initialized to max value.
+ BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinValueSize), maxValue);
+ BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinKeySize), maxValue);
+}
+
+BOOST_AUTO_TEST_CASE(PerfCountersTest)
+{
+ enum class TestCounter
+ {
+ Counter = 0,
+ Count
+ };
+
+ PerfCounters perfCounters;
+
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 0);
+
+ perfCounters.Set(TestCounter::Counter, 10);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
+
+ perfCounters.Increment(TestCounter::Counter);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 11);
+
+ perfCounters.Decrement(TestCounter::Counter);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
+
+ perfCounters.Add(TestCounter::Counter, 5);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 15);
+
+ perfCounters.Subtract(TestCounter::Counter, 10);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 5);
+
+ perfCounters.Max(TestCounter::Counter, 10);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
+
+ perfCounters.Max(TestCounter::Counter, 9);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
+
+ perfCounters.Min(TestCounter::Counter, 1);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1);
+
+ perfCounters.Min(TestCounter::Counter, 10);
+ BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1);
+}
+
+
+BOOST_AUTO_TEST_CASE(PerfDataTest)
+{
+ PerfData testPerfData;
+
+ BOOST_CHECK(testPerfData.GetHashTablesPerfData().empty());
+
+ HashTablePerfData htPerfData1;
+ HashTablePerfData htPerfData2;
+ HashTablePerfData htPerfData3;
+
+ CheckMinCounters(htPerfData1);
+ CheckMinCounters(htPerfData2);
+ CheckMinCounters(htPerfData3);
+
+ testPerfData.AddHashTablePerfData("HT1", htPerfData1);
+ testPerfData.AddHashTablePerfData("HT2", htPerfData2);
+ testPerfData.AddHashTablePerfData("HT3", htPerfData3);
+
+ /// Update counters and check if they are correctly updated.
+ htPerfData1.Set(HashTablePerfCounter::TotalKeySize, 10);
+ htPerfData2.Set(HashTablePerfCounter::TotalKeySize, 20);
+ htPerfData3.Set(HashTablePerfCounter::TotalKeySize, 30);
+
+ // Check if the hash table perf data is correctly registered.
+ const auto& hashTablesPerfData = testPerfData.GetHashTablesPerfData();
+ BOOST_CHECK_EQUAL(hashTablesPerfData.size(), 3U);
+
+ {
+ auto htPerfDataIt = hashTablesPerfData.find("HT1");
+ BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
+ BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 10);
+ }
+ {
+ auto htPerfDataIt = hashTablesPerfData.find("HT2");
+ BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
+ BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 20);
+ }
+ {
+ auto htPerfDataIt = hashTablesPerfData.find("HT3");
+ BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
+ BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 30);
+ }
+}
+
+} // namespace LocalMemory
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/ReadWriteHashTableSerializerTest.cpp b/Unittests/ReadWriteHashTableSerializerTest.cpp
new file mode 100644
index 0000000..d5c787c
--- /dev/null
+++ b/Unittests/ReadWriteHashTableSerializerTest.cpp
@@ -0,0 +1,220 @@
+#include "stdafx.h"
+#include "Utils.h"
+#include "Mocks.h"
+#include "L4/HashTable/ReadWrite/HashTable.h"
+#include "L4/HashTable/ReadWrite/Serializer.h"
+#include "L4/Log/PerfCounter.h"
+#include
+#include
+
+namespace L4
+{
+namespace UnitTests
+{
+
+class LocalMemory
+{
+public:
+ template
+ using Allocator = typename std::allocator;
+
+ template
+ using Deleter = typename std::default_delete;
+
+ template
+ using UniquePtr = std::unique_ptr;
+
+ LocalMemory() = default;
+
+ template
+ auto MakeUnique(Args&&... args)
+ {
+ return std::make_unique(std::forward(args)...);
+ }
+
+ template
+ auto GetAllocator()
+ {
+ return Allocator();
+ }
+
+ template
+ auto GetDeleter()
+ {
+ return Deleter();
+ }
+
+ LocalMemory(const LocalMemory&) = delete;
+ LocalMemory& operator=(const LocalMemory&) = delete;
+};
+
+using namespace HashTable::ReadWrite;
+
+BOOST_AUTO_TEST_SUITE(HashTableSerializerTests)
+
+using KeyValuePair = std::pair;
+using KeyValuePairs = std::vector;
+using Memory = LocalMemory;
+using Allocator = typename Memory:: template Allocator<>;
+using HashTable = WritableHashTable::HashTable;
+
+template
+void ValidateSerializer(
+ const Serializer& serializer,
+ const Deserializer& deserializer,
+ std::uint8_t serializerVersion,
+ const KeyValuePairs& keyValuePairs,
+ const Utils::ExpectedCounterValues& expectedCounterValuesAfterLoad,
+ const Utils::ExpectedCounterValues& expectedCounterValuesAfterSerialization,
+ const Utils::ExpectedCounterValues& expectedCounterValuesAfterDeserialization)
+{
+ LocalMemory memory;
+ MockEpochManager epochManager;
+
+ auto hashTableHolder{
+ memory.MakeUnique(
+ HashTable::Setting{ 5 }, memory.GetAllocator()) };
+ BOOST_CHECK(hashTableHolder != nullptr);
+
+ WritableHashTable writableHashTable(*hashTableHolder, epochManager);
+
+ // Insert the given key/value pairs to the hash table.
+ for (const auto& pair : keyValuePairs)
+ {
+ auto key = Utils::ConvertFromString(pair.first.c_str());
+ auto val = Utils::ConvertFromString(pair.second.c_str());
+
+ writableHashTable.Add(key, val);
+ }
+
+ const auto& perfData = writableHashTable.GetPerfData();
+
+ Utils::ValidateCounters(perfData, expectedCounterValuesAfterLoad);
+
+ // Now write the hash table to the stream.
+ MockStreamWriter writer;
+ BOOST_CHECK(!writer.IsValid());
+ serializer.Serialize(*hashTableHolder, writer);
+ BOOST_CHECK(writer.IsValid());
+ Utils::ValidateCounters(perfData, expectedCounterValuesAfterSerialization);
+
+ // Read in the hash table from the stream and validate it.
+ MockStreamReader reader(writer.GetStreamBuffer());
+
+ // version == 0 means that it's run through the HashTableSerializer, thus the following can be skipped.
+ if (serializerVersion != 0)
+ {
+ std::uint8_t actualSerializerVersion = 0;
+ reader.Begin();
+ reader.Read(&actualSerializerVersion, sizeof(actualSerializerVersion));
+ BOOST_CHECK(actualSerializerVersion == serializerVersion);
+ }
+ else
+ {
+ BOOST_REQUIRE(typeid(L4::HashTable::ReadWrite::Serializer) == typeid(Serializer));
+ }
+
+ BOOST_CHECK(!reader.IsValid());
+
+ auto newHashTableHolder = deserializer.Deserialize(memory, reader);
+
+ BOOST_CHECK(reader.IsValid());
+ BOOST_CHECK(newHashTableHolder != nullptr);
+
+ WritableHashTable newWritableHashTable(*newHashTableHolder, epochManager);
+
+ const auto& newPerfData = newWritableHashTable.GetPerfData();
+
+ Utils::ValidateCounters(newPerfData, expectedCounterValuesAfterDeserialization);
+
+ // Make sure all the key/value pairs exist after deserialization.
+ for (const auto& pair : keyValuePairs)
+ {
+ auto key = Utils::ConvertFromString(pair.first.c_str());
+ IReadOnlyHashTable::Value val;
+ BOOST_CHECK(newWritableHashTable.Get(key, val));
+ BOOST_CHECK(Utils::ConvertToString(val) == pair.second);
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE(CurrentSerializerTest)
+{
+ ValidateSerializer(
+ Current::Serializer{},
+ Current::Deserializer{ L4::Utils::Properties{} },
+ Current::c_version,
+ {
+ { "hello1", " world1" },
+ { "hello2", " world2" },
+ { "hello3", " world3" }
+ },
+ {
+ { HashTablePerfCounter::RecordsCount, 3 },
+ { HashTablePerfCounter::BucketsCount, 5 },
+ { HashTablePerfCounter::TotalKeySize, 18 },
+ { HashTablePerfCounter::TotalValueSize, 21 },
+ { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
+ { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
+ },
+ {
+ { HashTablePerfCounter::RecordsCount, 3 },
+ { HashTablePerfCounter::BucketsCount, 5 },
+ { HashTablePerfCounter::TotalKeySize, 18 },
+ { HashTablePerfCounter::TotalValueSize, 21 },
+ { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
+ { HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 }
+ },
+ {
+ { HashTablePerfCounter::RecordsCount, 3 },
+ { HashTablePerfCounter::BucketsCount, 5 },
+ { HashTablePerfCounter::TotalKeySize, 18 },
+ { HashTablePerfCounter::TotalValueSize, 21 },
+ { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 },
+ { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
+ });
+}
+
+
+BOOST_AUTO_TEST_CASE(HashTableSerializeTest)
+{
+ // This test case tests end to end scenario using the HashTableSerializer.
+ ValidateSerializer(
+ Serializer{},
+ Deserializer{ L4::Utils::Properties{} },
+ 0U,
+ {
+ { "hello1", " world1" },
+ { "hello2", " world2" },
+ { "hello3", " world3" }
+ },
+ {
+ { HashTablePerfCounter::RecordsCount, 3 },
+ { HashTablePerfCounter::BucketsCount, 5 },
+ { HashTablePerfCounter::TotalKeySize, 18 },
+ { HashTablePerfCounter::TotalValueSize, 21 },
+ { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
+ { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
+ },
+ {
+ { HashTablePerfCounter::RecordsCount, 3 },
+ { HashTablePerfCounter::BucketsCount, 5 },
+ { HashTablePerfCounter::TotalKeySize, 18 },
+ { HashTablePerfCounter::TotalValueSize, 21 },
+ { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
+ { HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 }
+ },
+ {
+ { HashTablePerfCounter::RecordsCount, 3 },
+ { HashTablePerfCounter::BucketsCount, 5 },
+ { HashTablePerfCounter::TotalKeySize, 18 },
+ { HashTablePerfCounter::TotalValueSize, 21 },
+ { HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 },
+ { HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
+ });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/ReadWriteHashTableTest.cpp b/Unittests/ReadWriteHashTableTest.cpp
new file mode 100644
index 0000000..84636e7
--- /dev/null
+++ b/Unittests/ReadWriteHashTableTest.cpp
@@ -0,0 +1,676 @@
+#include "stdafx.h"
+#include "Utils.h"
+#include "Mocks.h"
+#include "CheckedAllocator.h"
+#include "L4/Log/PerfCounter.h"
+#include "L4/HashTable/ReadWrite/HashTable.h"
+
+namespace L4
+{
+namespace UnitTests
+{
+
+using namespace HashTable::ReadWrite;
+
+class ReadWriteHashTableTestFixture
+{
+protected:
+ using Allocator = CheckedAllocator<>;
+ using HashTable = WritableHashTable::HashTable;
+
+ ReadWriteHashTableTestFixture()
+ : m_allocator{}
+ , m_epochManager{}
+ {}
+
+ Allocator m_allocator;
+ MockEpochManager m_epochManager;
+};
+
+
+BOOST_FIXTURE_TEST_SUITE(ReadWriteHashTableTests, ReadWriteHashTableTestFixture)
+
+
+BOOST_AUTO_TEST_CASE(HashTableTest)
+{
+ HashTable hashTable{ HashTable::Setting{ 100, 5 }, m_allocator };
+ WritableHashTable writableHashTable(hashTable, m_epochManager);
+ ReadOnlyHashTable readOnlyHashTable(hashTable);
+
+ const auto& perfData = writableHashTable.GetPerfData();
+
+ {
+ // Check empty data.
+
+ std::string keyStr = "hello";
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+
+ IReadOnlyHashTable::Value data;
+ BOOST_CHECK(!readOnlyHashTable.Get(key, data));
+
+ const auto c_counterMaxValue = (std::numeric_limits::max)();
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 0 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::ChainingEntriesCount, 0 },
+ { HashTablePerfCounter::TotalKeySize, 0 },
+ { HashTablePerfCounter::TotalValueSize, 0 },
+ { HashTablePerfCounter::MinKeySize, c_counterMaxValue },
+ { HashTablePerfCounter::MaxKeySize, 0 },
+ { HashTablePerfCounter::MinValueSize, c_counterMaxValue },
+ { HashTablePerfCounter::MaxValueSize, 0 }
+ });
+ }
+
+
+ {
+ // First record added.
+ std::string keyStr = "hello";
+ std::string valStr = "world";
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ writableHashTable.Add(key, val);
+
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(readOnlyHashTable.Get(key, value));
+ BOOST_CHECK(value.m_size == valStr.size());
+ BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 1 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::ChainingEntriesCount, 0 },
+ { HashTablePerfCounter::TotalKeySize, 5 },
+ { HashTablePerfCounter::TotalValueSize, 5 },
+ { HashTablePerfCounter::MinKeySize, 5 },
+ { HashTablePerfCounter::MaxKeySize, 5 },
+ { HashTablePerfCounter::MinValueSize, 5 },
+ { HashTablePerfCounter::MaxValueSize, 5 }
+ });
+ }
+
+ {
+ // Second record added.
+ std::string keyStr = "hello2";
+ std::string valStr = "world2";
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ writableHashTable.Add(key, val);
+
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(readOnlyHashTable.Get(key, value));
+ BOOST_CHECK(value.m_size == valStr.size());
+ BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 2 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::ChainingEntriesCount, 0 },
+ { HashTablePerfCounter::TotalKeySize, 11 },
+ { HashTablePerfCounter::TotalValueSize, 11 },
+ { HashTablePerfCounter::MinKeySize, 5 },
+ { HashTablePerfCounter::MaxKeySize, 6 },
+ { HashTablePerfCounter::MinValueSize, 5 },
+ { HashTablePerfCounter::MaxValueSize, 6 }
+ });
+ }
+
+ {
+ // Update the key with value bigger than the existing values.
+ std::string keyStr = "hello";
+ std::string valStr = "world long string";
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ writableHashTable.Add(key, val);
+
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(readOnlyHashTable.Get(key, value));
+ BOOST_CHECK(value.m_size == valStr.size());
+ BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
+ BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 1);
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 2 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::ChainingEntriesCount, 0 },
+ { HashTablePerfCounter::TotalKeySize, 11 },
+ { HashTablePerfCounter::TotalValueSize, 23 },
+ { HashTablePerfCounter::MinKeySize, 5 },
+ { HashTablePerfCounter::MaxKeySize, 6 },
+ { HashTablePerfCounter::MinValueSize, 5 },
+ { HashTablePerfCounter::MaxValueSize, 17 }
+ });
+ }
+
+ {
+ // Update the key with value smaller than the existing values.
+ std::string keyStr = "hello2";
+ std::string valStr = "wo";
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ writableHashTable.Add(key, val);
+
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(readOnlyHashTable.Get(key, value));
+ BOOST_CHECK(value.m_size == valStr.size());
+ BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
+ BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 2);
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 2 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::ChainingEntriesCount, 0 },
+ { HashTablePerfCounter::TotalKeySize, 11 },
+ { HashTablePerfCounter::TotalValueSize, 19 },
+ { HashTablePerfCounter::MinKeySize, 5 },
+ { HashTablePerfCounter::MaxKeySize, 6 },
+ { HashTablePerfCounter::MinValueSize, 2 },
+ { HashTablePerfCounter::MaxValueSize, 17 }
+ });
+ }
+
+ {
+ // Remove the first key.
+ std::string keyStr = "hello";
+ std::string valStr = "";
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ BOOST_CHECK(writableHashTable.Remove(key));
+ BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 3);
+
+ // Note that the Remove() doesn't change Min/Max counters by design.
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 1 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::ChainingEntriesCount, 0 },
+ { HashTablePerfCounter::TotalKeySize, 6 },
+ { HashTablePerfCounter::TotalValueSize, 2 },
+ { HashTablePerfCounter::MinKeySize, 5 },
+ { HashTablePerfCounter::MaxKeySize, 6 },
+ { HashTablePerfCounter::MinValueSize, 2 },
+ { HashTablePerfCounter::MaxValueSize, 17 }
+ });
+
+ // Remove the second key.
+ keyStr = "hello2";
+ key = Utils::ConvertFromString(keyStr.c_str());
+
+ BOOST_CHECK(writableHashTable.Remove(key));
+ BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 4);
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 0 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::ChainingEntriesCount, 0 },
+ { HashTablePerfCounter::TotalKeySize, 0 },
+ { HashTablePerfCounter::TotalValueSize, 0 },
+ { HashTablePerfCounter::MinKeySize, 5 },
+ { HashTablePerfCounter::MaxKeySize, 6 },
+ { HashTablePerfCounter::MinValueSize, 2 },
+ { HashTablePerfCounter::MaxValueSize, 17 }
+ });
+
+ // Removing the key that doesn't exist.
+ BOOST_CHECK(!writableHashTable.Remove(key));
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 0 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::ChainingEntriesCount, 0 },
+ { HashTablePerfCounter::TotalKeySize, 0 },
+ { HashTablePerfCounter::TotalValueSize, 0 },
+ { HashTablePerfCounter::MinKeySize, 5 },
+ { HashTablePerfCounter::MaxKeySize, 6 },
+ { HashTablePerfCounter::MinValueSize, 2 },
+ { HashTablePerfCounter::MaxValueSize, 17 }
+ });
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE(HashTableWithOneBucketTest)
+{
+ Allocator allocator;
+ HashTable hashTable{ HashTable::Setting{ 1 }, allocator };
+ WritableHashTable writableHashTable(hashTable, m_epochManager);
+ ReadOnlyHashTable readOnlyHashTable(hashTable);
+
+ const auto& perfData = writableHashTable.GetPerfData();
+
+ Utils::ValidateCounters(perfData, { { HashTablePerfCounter::ChainingEntriesCount, 0 } });
+
+ const auto initialTotalIndexSize = perfData.Get(HashTablePerfCounter::TotalIndexSize);
+ const std::size_t c_dataSetSize = HashTable::Entry::c_numDataPerEntry + 5U;
+
+ std::size_t expectedTotalKeySize = 0U;
+ std::size_t expectedTotalValueSize = 0U;
+
+ for (auto i = 0U; i < c_dataSetSize; ++i)
+ {
+ std::stringstream keyStream;
+ keyStream << "key" << i;
+
+ std::stringstream valStream;
+ valStream << "value" << i;
+
+ std::string keyStr = keyStream.str();
+ std::string valStr = valStream.str();
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ expectedTotalKeySize += key.m_size;
+ expectedTotalValueSize += val.m_size;
+
+ writableHashTable.Add(key, val);
+
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(readOnlyHashTable.Get(key, value));
+ BOOST_CHECK(value.m_size == valStr.size());
+ BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
+ }
+
+ using L4::HashTable::RecordSerializer;
+
+ // Variable key/value sizes.
+ const auto recordOverhead = RecordSerializer{ 0U, 0U }.CalculateRecordOverhead();
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, c_dataSetSize },
+ { HashTablePerfCounter::BucketsCount, 1 },
+ { HashTablePerfCounter::MaxBucketChainLength, 2 },
+ { HashTablePerfCounter::ChainingEntriesCount, 1 },
+ { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize },
+ { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize },
+ {
+ HashTablePerfCounter::TotalIndexSize,
+ initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead)
+ }
+ });
+
+ // Now replace with new values.
+ expectedTotalValueSize = 0U;
+
+ for (auto i = 0U; i < c_dataSetSize; ++i)
+ {
+ std::stringstream keyStream;
+ keyStream << "key" << i;
+
+ std::stringstream valStream;
+ valStream << "val" << i;
+
+ std::string keyStr = keyStream.str();
+ std::string valStr = valStream.str();
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ expectedTotalValueSize += val.m_size;
+
+ writableHashTable.Add(key, val);
+
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(readOnlyHashTable.Get(key, value));
+ BOOST_CHECK(value.m_size == valStr.size());
+ BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
+ }
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, c_dataSetSize },
+ { HashTablePerfCounter::BucketsCount, 1 },
+ { HashTablePerfCounter::MaxBucketChainLength, 2 },
+ { HashTablePerfCounter::ChainingEntriesCount, 1 },
+ { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize },
+ { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize },
+ {
+ HashTablePerfCounter::TotalIndexSize,
+ initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead)
+ }
+ });
+
+ // Now remove all key-value.
+ for (auto i = 0U; i < c_dataSetSize; ++i)
+ {
+ std::stringstream keyStream;
+ keyStream << "key" << i;
+
+ std::string keyStr = keyStream.str();
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+
+ BOOST_CHECK(writableHashTable.Remove(key));
+
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(!readOnlyHashTable.Get(key, value));
+ }
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, 0 },
+ { HashTablePerfCounter::BucketsCount, 1 },
+ { HashTablePerfCounter::MaxBucketChainLength, 2 },
+ { HashTablePerfCounter::ChainingEntriesCount, 1 },
+ { HashTablePerfCounter::TotalKeySize, 0 },
+ { HashTablePerfCounter::TotalValueSize, 0 },
+ {
+ HashTablePerfCounter::TotalIndexSize,
+ initialTotalIndexSize + sizeof(HashTable::Entry)
+ }
+ });
+
+ // Try to add back to the same bucket (reusing existing entries)
+ expectedTotalKeySize = 0U;
+ expectedTotalValueSize = 0U;
+
+ for (auto i = 0U; i < c_dataSetSize; ++i)
+ {
+ std::stringstream keyStream;
+ keyStream << "key" << i;
+
+ std::stringstream valStream;
+ valStream << "value" << i;
+
+ std::string keyStr = keyStream.str();
+ std::string valStr = valStream.str();
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ expectedTotalKeySize += key.m_size;
+ expectedTotalValueSize += val.m_size;
+
+ writableHashTable.Add(key, val);
+
+ IReadOnlyHashTable::Value value;
+ BOOST_CHECK(readOnlyHashTable.Get(key, value));
+ BOOST_CHECK(value.m_size == valStr.size());
+ BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
+ }
+
+ Utils::ValidateCounters(
+ perfData,
+ {
+ { HashTablePerfCounter::RecordsCount, c_dataSetSize },
+ { HashTablePerfCounter::BucketsCount, 1 },
+ { HashTablePerfCounter::MaxBucketChainLength, 2 },
+ { HashTablePerfCounter::ChainingEntriesCount, 1 },
+ { HashTablePerfCounter::TotalKeySize, expectedTotalKeySize },
+ { HashTablePerfCounter::TotalValueSize, expectedTotalValueSize },
+ {
+ HashTablePerfCounter::TotalIndexSize,
+ initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead)
+ }
+ });
+}
+
+
+BOOST_AUTO_TEST_CASE(AddRemoveSameKeyTest)
+{
+ HashTable hashTable{ HashTable::Setting{ 100, 5 }, m_allocator };
+ WritableHashTable writableHashTable(hashTable, m_epochManager);
+ ReadOnlyHashTable readOnlyHashTable(hashTable);
+
+ // Add two key/value pairs.
+ auto key1 = Utils::ConvertFromString("key1");
+ auto val1 = Utils::ConvertFromString("val1");
+ writableHashTable.Add(key1, val1);
+
+ IReadOnlyHashTable::Value valueRetrieved;
+ BOOST_CHECK(readOnlyHashTable.Get(key1, valueRetrieved));
+ BOOST_CHECK(valueRetrieved.m_size == val1.m_size);
+ BOOST_CHECK(!memcmp(valueRetrieved.m_data, val1.m_data, val1.m_size));
+
+ auto key2 = Utils::ConvertFromString("key2");
+ auto val2 = Utils::ConvertFromString("val2");
+ writableHashTable.Add(key2, val2);
+
+ BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved));
+ BOOST_CHECK(valueRetrieved.m_size == val2.m_size);
+ BOOST_CHECK(!memcmp(valueRetrieved.m_data, val2.m_data, val2.m_size));
+
+ const auto& perfData = writableHashTable.GetPerfData();
+
+ // Now remove the first record with key = "key1", which is at the head of the chain.
+ BOOST_CHECK(writableHashTable.Remove(key1));
+ BOOST_CHECK(!readOnlyHashTable.Get(key1, valueRetrieved));
+ Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1);
+
+ // Now try update the record with key = "key2". This should correctly update the existing record
+ // instead of using the empty slot created by removing the record with key = "key1".
+ auto newVal2 = Utils::ConvertFromString("newVal2");
+ writableHashTable.Add(key2, newVal2);
+
+ BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved));
+ BOOST_CHECK(valueRetrieved.m_size == newVal2.m_size);
+ BOOST_CHECK(!memcmp(valueRetrieved.m_data, newVal2.m_data, newVal2.m_size));
+ Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1);
+
+ // Remove the record with key = "key2".
+ BOOST_CHECK(writableHashTable.Remove(key2));
+ BOOST_CHECK(!writableHashTable.Remove(key2));
+ Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 0);
+}
+
+
+BOOST_AUTO_TEST_CASE(FixedKeyValueHashTableTest)
+{
+ // Fixed 4 byte keys and 6 byte values.
+ std::vector settings =
+ {
+ HashTable::Setting{ 100, 200, 4, 0 },
+ HashTable::Setting{ 100, 200, 0, 6 },
+ HashTable::Setting{ 100, 200, 4, 6 }
+ };
+
+ for (const auto& setting : settings)
+ {
+ HashTable hashTable{ setting, m_allocator };
+ WritableHashTable writableHashTable(hashTable, m_epochManager);
+ ReadOnlyHashTable readOnlyHashTable(hashTable);
+
+ constexpr std::uint8_t c_numRecords = 10;
+
+ for (std::uint8_t i = 0; i < c_numRecords; ++i)
+ {
+ const std::string keyStr = "key" + std::to_string(i);
+ const std::string valueStr = "value" + std::to_string(i);
+
+ writableHashTable.Add(
+ Utils::ConvertFromString(keyStr.c_str()),
+ Utils::ConvertFromString(valueStr.c_str()));
+ }
+
+ Utils::ValidateCounters(
+ writableHashTable.GetPerfData(),
+ {
+ { HashTablePerfCounter::RecordsCount, 10 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::TotalKeySize, 40 },
+ { HashTablePerfCounter::TotalValueSize, 60 },
+ { HashTablePerfCounter::MinKeySize, 4 },
+ { HashTablePerfCounter::MaxKeySize, 4 },
+ { HashTablePerfCounter::MinValueSize, 6 },
+ { HashTablePerfCounter::MaxValueSize, 6 }
+ });
+
+ for (std::uint8_t i = 0; i < c_numRecords; ++i)
+ {
+ const std::string keyStr = "key" + std::to_string(i);
+ const std::string valueStr = "value" + std::to_string(i);
+ const auto expectedValue = Utils::ConvertFromString(valueStr.c_str());
+
+ IReadOnlyHashTable::Value actualValue;
+ BOOST_CHECK(readOnlyHashTable.Get(
+ Utils::ConvertFromString(keyStr.c_str()),
+ actualValue));
+ BOOST_CHECK(expectedValue == actualValue);
+ }
+
+ for (std::uint8_t i = 0; i < c_numRecords; ++i)
+ {
+ const std::string keyStr = "key" + std::to_string(i);
+ writableHashTable.Remove(
+ Utils::ConvertFromString(keyStr.c_str()));
+ }
+
+ Utils::ValidateCounters(
+ writableHashTable.GetPerfData(),
+ {
+ { HashTablePerfCounter::RecordsCount, 0 },
+ { HashTablePerfCounter::BucketsCount, 100 },
+ { HashTablePerfCounter::TotalKeySize, 0 },
+ { HashTablePerfCounter::TotalValueSize, 0 }
+ });
+ }
+}
+
+
+BOOST_AUTO_TEST_CASE(HashTableIteratorTest)
+{
+ Allocator allocator;
+ constexpr std::uint32_t c_numBuckets = 10;
+ HashTable hashTable{ HashTable::Setting{ c_numBuckets }, allocator };
+ WritableHashTable writableHashTable(hashTable, m_epochManager);
+
+ {
+ // Empty data set, thus iterator cannot move.
+ auto iter = writableHashTable.GetIterator();
+ BOOST_CHECK(!iter->MoveNext());
+
+ CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
+ iter->GetKey(),
+ "HashTableIterator is not correctly used.");
+
+ CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
+ iter->GetValue(),
+ "HashTableIterator is not correctly used.");
+ }
+
+ using Buffer = std::vector;
+ using BufferMap = std::map;
+
+ BufferMap keyValueMap;
+
+ // The number of records should be such that it will create chained entries
+ // for at least one bucket. So it should be greater than HashTable::Entry::c_numDataPerEntry * number of buckets.
+ constexpr std::uint32_t c_numRecords = (HashTable::Entry::c_numDataPerEntry * c_numBuckets) + 1;
+
+ for (auto i = 0U; i < c_numRecords; ++i)
+ {
+ std::stringstream keyStream;
+ keyStream << "key" << i;
+
+ std::stringstream valStream;
+ valStream << "value" << i;
+
+ std::string keyStr = keyStream.str();
+ std::string valStr = valStream.str();
+
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+ auto val = Utils::ConvertFromString(valStr.c_str());
+
+ writableHashTable.Add(key, val);
+
+ keyValueMap[Buffer(key.m_data, key.m_data + key.m_size)] = Buffer(val.m_data, val.m_data + val.m_size);
+ }
+
+ BOOST_REQUIRE(writableHashTable.GetPerfData().Get(HashTablePerfCounter::MaxBucketChainLength) >= 2);
+ BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords);
+
+ {
+ BufferMap keyValueMapFromIterator;
+
+ // Validate the data using the iterator.
+ auto iter = writableHashTable.GetIterator();
+ for (auto i = 0U; i < c_numRecords; ++i)
+ {
+ BOOST_CHECK(iter->MoveNext());
+
+ const auto& key = iter->GetKey();
+ const auto& val = iter->GetValue();
+
+ keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] = Buffer(val.m_data, val.m_data + val.m_size);
+ }
+ BOOST_CHECK(!iter->MoveNext());
+ BOOST_CHECK(keyValueMap == keyValueMapFromIterator);
+
+ // Reset should move the iterator to the beginning.
+ iter->Reset();
+ for (auto i = 0U; i < c_numRecords; ++i)
+ {
+ BOOST_CHECK(iter->MoveNext());
+ }
+ BOOST_CHECK(!iter->MoveNext());
+ }
+
+ // Remove half of the key.
+ for (auto i = 0U; i < c_numRecords; ++i)
+ {
+ if (i % 2 == 0U)
+ {
+ std::stringstream keyStream;
+ keyStream << "key" << i;
+
+ std::string keyStr = keyStream.str();
+ auto key = Utils::ConvertFromString(keyStr.c_str());
+
+ BOOST_CHECK(writableHashTable.Remove(key));
+
+ keyValueMap.erase(Buffer(key.m_data, key.m_data + key.m_size));
+ }
+ }
+
+ BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords / 2U);
+
+ // Validate only the existing keys are iterated.
+ {
+ BufferMap keyValueMapFromIterator;
+ auto iter = writableHashTable.GetIterator();
+ for (auto i = 0U; i < c_numRecords / 2U; ++i)
+ {
+ BOOST_CHECK(iter->MoveNext());
+
+ const auto& key = iter->GetKey();
+ const auto& val = iter->GetValue();
+
+ keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] =
+ Buffer(val.m_data, val.m_data + val.m_size);
+ }
+ BOOST_CHECK(!iter->MoveNext());
+ BOOST_CHECK(keyValueMap == keyValueMapFromIterator);
+ }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/SettingAdapterTest.cpp b/Unittests/SettingAdapterTest.cpp
new file mode 100644
index 0000000..1412ff6
--- /dev/null
+++ b/Unittests/SettingAdapterTest.cpp
@@ -0,0 +1,41 @@
+#include "stdafx.h"
+#include "L4/HashTable/Common/SettingAdapter.h"
+#include "L4/HashTable/Common/Record.h"
+#include "CheckedAllocator.h"
+
+namespace L4
+{
+namespace UnitTests
+{
+
+using SharedHashTable = HashTable::SharedHashTable>;
+
+BOOST_AUTO_TEST_SUITE(SettingAdapterTests)
+
+BOOST_AUTO_TEST_CASE(SettingAdapterTestWithDefaultValues)
+{
+ HashTableConfig::Setting from{ 100U };
+ const auto to = HashTable::SettingAdapter{}.Convert(from);
+
+ BOOST_CHECK_EQUAL(to.m_numBuckets, 100U);
+ BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 1U);
+ BOOST_CHECK_EQUAL(to.m_fixedKeySize, 0U);
+ BOOST_CHECK_EQUAL(to.m_fixedValueSize, 0U);
+}
+
+
+BOOST_AUTO_TEST_CASE(SettingAdapterTestWithNonDefaultValues)
+{
+ HashTableConfig::Setting from{ 100U, 10U, 5U, 20U };
+ const auto to = HashTable::SettingAdapter{}.Convert(from);
+
+ BOOST_CHECK_EQUAL(to.m_numBuckets, 100U);
+ BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 10U);
+ BOOST_CHECK_EQUAL(to.m_fixedKeySize, 5U);
+ BOOST_CHECK_EQUAL(to.m_fixedValueSize, 20U);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/Unittests.vcxproj b/Unittests/Unittests.vcxproj
new file mode 100644
index 0000000..2224010
--- /dev/null
+++ b/Unittests/Unittests.vcxproj
@@ -0,0 +1,94 @@
+
+
+
+
+ Debug
+ x64
+
+
+ Release
+ x64
+
+
+
+ {8122529E-61CB-430B-A089-B12E63FC361B}
+
+
+
+ Application
+
+
+ true
+ v140
+
+
+ false
+ v140
+
+
+
+ L4.UnitTests
+
+
+
+ Console
+ netapi32.lib;%(AdditionalDependencies)
+ true
+
+
+ MachineX64
+
+
+ $(SolutionDir)Unittests;$(SolutionDir)inc;$(SolutionDir)inc/L4;%(AdditionalIncludeDirectories)
+ Use
+ /Zm136 %(AdditionalOptions)
+ _SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)
+ MaxSpeed
+ AnySuitable
+ true
+ 4482;%(DisableSpecificWarnings)
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Create
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {b7846115-88f1-470b-a625-9de0c29229bb}
+
+
+
+
+
+
+ This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Unittests/Unittests.vcxproj.filters b/Unittests/Unittests.vcxproj.filters
new file mode 100644
index 0000000..e1af83e
--- /dev/null
+++ b/Unittests/Unittests.vcxproj.filters
@@ -0,0 +1,72 @@
+
+
+
+
+ {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
+ cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
+
+
+ {93995380-89BD-4b04-88EB-625FBE52EBFB}
+ h;hpp;hxx;hm;inl;inc;xsd
+
+
+ {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
+ rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
+
+
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+ Source Files
+
+
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+ Header Files
+
+
+
+
+
+
\ No newline at end of file
diff --git a/Unittests/Utils.cpp b/Unittests/Utils.cpp
new file mode 100644
index 0000000..607e367
--- /dev/null
+++ b/Unittests/Utils.cpp
@@ -0,0 +1,37 @@
+#include "stdafx.h"
+#include "Utils.h"
+
+namespace L4
+{
+namespace UnitTests
+{
+namespace Utils
+{
+
+
+void ValidateCounter(
+ const HashTablePerfData& actual,
+ HashTablePerfCounter perfCounter,
+ PerfCounters::TValue expectedValue)
+{
+ BOOST_CHECK_MESSAGE(
+ actual.Get(perfCounter) == expectedValue,
+ c_hashTablePerfCounterNames[static_cast(perfCounter)]
+ << " counter: "
+ << actual.Get(perfCounter)
+ << " (actual) != " << expectedValue << " (expected).");
+}
+
+void ValidateCounters(
+ const HashTablePerfData& actual,
+ const ExpectedCounterValues& expected)
+{
+ for (const auto& expectedCounter : expected)
+ {
+ ValidateCounter(actual, expectedCounter.first, expectedCounter.second);
+ }
+}
+
+} // namespace Utils
+} // namespace UnitTests
+} // namespace L4
\ No newline at end of file
diff --git a/Unittests/Utils.h b/Unittests/Utils.h
new file mode 100644
index 0000000..15a3047
--- /dev/null
+++ b/Unittests/Utils.h
@@ -0,0 +1,105 @@
+#pragma once
+
+#include
+#include
+#include
+#include "L4/Log/PerfCounter.h"
+#include "L4/Utils/Exception.h"
+
+namespace L4
+{
+namespace UnitTests
+{
+
+// Macro CHECK_EXCEPTION_THROWN
+
+#define CHECK_EXCEPTION_THROWN(statement) \
+do { \
+ bool isExceptionThrown = false;\
+ try \
+ { \
+ statement; \
+ } \
+ catch (const RuntimeException&) \
+ { \
+ isExceptionThrown = true; \
+ } \
+ BOOST_CHECK(isExceptionThrown); \
+} while (0)
+
+
+#define CHECK_EXCEPTION_THROWN_WITH_MESSAGE(statement, message) \
+do { \
+ bool isExceptionThrown = false; \
+ std::string exceptionMsg; \
+ try \
+ { \
+ statement; \
+ } \
+ catch (const RuntimeException& ex) \
+ { \
+ isExceptionThrown = true; \
+ exceptionMsg = ex.what(); \
+ BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \
+ } \
+ BOOST_CHECK(isExceptionThrown); \
+ BOOST_CHECK(strcmp((message), exceptionMsg.c_str()) == 0); \
+} while (0)
+
+
+// This will validate the given message is a prefix of the exception message.
+#define CHECK_EXCEPTION_THROWN_WITH_PREFIX_MESSAGE(statement, message) \
+do { \
+ bool isExceptionThrown = false; \
+ std::string exceptionMsg; \
+ try \
+ { \
+ statement; \
+ } \
+ catch (const RuntimeException& ex) \
+ { \
+ isExceptionThrown = true; \
+ exceptionMsg = ex.what(); \
+ BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \
+ } \
+ BOOST_CHECK(isExceptionThrown); \
+ BOOST_CHECK(exceptionMsg.compare(0, strlen(message), message) == 0); \
+} while (0)
+
+
+namespace Utils
+{
+
+template
+T ConvertFromString(const char* str)
+{
+ return T(
+ reinterpret_cast(str),
+ static_cast(strlen(str)));
+}
+
+template
+std::string ConvertToString(const T& t)
+{
+ return std::string(reinterpret_cast