This commit is contained in:
Terry Kim 2017-02-02 08:54:53 -08:00
Родитель e636e64131
Коммит 16f3d98905
70 изменённых файлов: 8020 добавлений и 0 удалений

Просмотреть файл

@ -0,0 +1,71 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}</ProjectGuid>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<PropertyGroup>
<TargetName>L4.Benchmark</TargetName>
</PropertyGroup>
<ItemDefinitionGroup>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
<Lib>
<TargetMachine>MachineX64</TargetMachine>
</Lib>
<ClCompile>
<AdditionalIncludeDirectories>$(SolutionDir)inc;$(SolutionDir)inc/L4;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<Optimization Condition="'$(Configuration)|$(Platform)'=='Release|x64'">MaxSpeed</Optimization>
<InlineFunctionExpansion Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AnySuitable</InlineFunctionExpansion>
<IntrinsicFunctions Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</IntrinsicFunctions>
</ClCompile>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="main.cpp" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Build\L4.vcxproj">
<Project>{b7846115-88f1-470b-a625-9de0c29229bb}</Project>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<Import Project="..\packages\boost.1.63.0.0\build\native\boost.targets" Condition="Exists('..\packages\boost.1.63.0.0\build\native\boost.targets')" />
<Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
<PropertyGroup>
<ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
</PropertyGroup>
<Error Condition="!Exists('..\packages\boost.1.63.0.0\build\native\boost.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\boost.1.63.0.0\build\native\boost.targets'))" />
<Error Condition="!Exists('..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets'))" />
<Error Condition="!Exists('..\packages\boost_program_options-vc140.1.63.0.0\build\native\boost_program_options-vc140.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\boost_program_options-vc140.1.63.0.0\build\native\boost_program_options-vc140.targets'))" />
</Target>
<Import Project="..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets" Condition="Exists('..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets')" />
<Import Project="..\packages\boost_program_options-vc140.1.63.0.0\build\native\boost_program_options-vc140.targets" Condition="Exists('..\packages\boost_program_options-vc140.1.63.0.0\build\native\boost_program_options-vc140.targets')" />
</Project>

Просмотреть файл

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Resource Files">
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />
</ItemGroup>
</Project>

710
Benchmark/main.cpp Normal file
Просмотреть файл

@ -0,0 +1,710 @@
#include "L4/LocalMemory/HashTableService.h"
#include "L4/Log/PerfCounter.h"
#include <algorithm>
#include <chrono>
#include <condition_variable>
#include <iostream>
#include <string>
#include <thread>
#include <boost/any.hpp>
#include <boost/program_options.hpp>
class Timer
{
public:
Timer()
: m_start{ std::chrono::high_resolution_clock::now() }
{}
void Reset()
{
m_start = std::chrono::high_resolution_clock::now();
}
std::chrono::microseconds GetElapsedTime()
{
return std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::high_resolution_clock::now() - m_start);
}
private:
std::chrono::time_point<std::chrono::steady_clock> m_start;
};
class SynchronizedTimer
{
public:
SynchronizedTimer() = default;
void Start()
{
if (m_isStarted)
{
return;
}
m_isStarted = true;
m_startCount = std::chrono::high_resolution_clock::now().time_since_epoch().count();
}
void End()
{
m_endCount = std::chrono::high_resolution_clock::now().time_since_epoch().count();
}
std::chrono::microseconds GetElapsedTime()
{
std::chrono::nanoseconds start{ m_startCount };
std::chrono::nanoseconds end{ m_endCount };
return std::chrono::duration_cast<std::chrono::microseconds>(end - start);
}
private:
std::atomic_bool m_isStarted = false;
std::atomic_uint64_t m_startCount;
std::atomic_uint64_t m_endCount;
};
struct PerThreadInfoForWriteTest
{
std::thread m_thread;
std::size_t m_dataSetSize = 0;
std::chrono::microseconds m_totalTime;
};
struct PerThreadInfoForReadTest
{
std::thread m_thread;
std::size_t m_dataSetSize = 0;
std::chrono::microseconds m_totalTime;
};
struct CommandLineOptions
{
static constexpr std::size_t c_defaultDataSetSize = 1000000;
static constexpr std::uint32_t c_defaultNumBuckets = 1000000;
static constexpr std::uint16_t c_defaultKeySize = 16;
static constexpr std::uint32_t c_defaultValueSize = 100;
static constexpr bool c_defaultRandomizeValueSize = false;
static constexpr std::uint32_t c_defaultNumIterationsPerGetContext = 1;
static constexpr std::uint16_t c_defaultNumThreads = 1;
static constexpr std::uint32_t c_defaultEpochProcessingIntervalInMilli = 10;
static constexpr std::uint16_t c_defaultNumActionsQueue = 1;
static constexpr std::uint32_t c_defaultRecordTimeToLiveInSeconds = 300;
static constexpr std::uint64_t c_defaultCacheSizeInBytes = 1024 * 1024 * 1024;
static constexpr bool c_defaultForceTimeBasedEviction = false;
std::string m_module;
std::size_t m_dataSetSize = 0;
std::uint32_t m_numBuckets = 0;
std::uint16_t m_keySize = 0;
std::uint32_t m_valueSize = 0;
bool m_randomizeValueSize = false;
std::uint32_t m_numIterationsPerGetContext = 0;
std::uint16_t m_numThreads = 0;
std::uint32_t m_epochProcessingIntervalInMilli;
std::uint8_t m_numActionsQueue = 0;
// The followings are specific for cache hash tables.
std::uint32_t m_recordTimeToLiveInSeconds = 0U;
std::uint64_t m_cacheSizeInBytes = 0U;
bool m_forceTimeBasedEviction = false;
bool IsCachingModule() const
{
static const std::string c_cachingModulePrefix{ "cache" };
return m_module.substr(0, c_cachingModulePrefix.size()) == c_cachingModulePrefix;
}
};
class DataGenerator
{
public:
DataGenerator(
std::size_t dataSetSize,
std::uint16_t keySize,
std::uint32_t valueSize,
bool randomizeValueSize,
bool isDebugMode = false)
: m_dataSetSize{ dataSetSize }
, m_keySize{ keySize }
{
if (isDebugMode)
{
std::cout << "Generating data set with size = " << dataSetSize << std::endl;
}
Timer timer;
// Populate keys.
m_keys.resize(m_dataSetSize);
m_keysBuffer.resize(m_dataSetSize);
for (std::size_t i = 0; i < m_dataSetSize; ++i)
{
m_keysBuffer[i].resize(keySize);
std::generate(m_keysBuffer[i].begin(), m_keysBuffer[i].end(), std::rand);
std::snprintf(reinterpret_cast<char*>(m_keysBuffer[i].data()), keySize, "%llu", i);
m_keys[i].m_data = m_keysBuffer[i].data();
m_keys[i].m_size = m_keySize;
}
// Populate values buffer. Assumes srand() is already called.
std::generate(m_valuesBuffer.begin(), m_valuesBuffer.end(), std::rand);
// Populate values.
m_values.resize(m_dataSetSize);
std::size_t currentIndex = 0;
for (std::size_t i = 0; i < m_dataSetSize; ++i)
{
m_values[i].m_data = &m_valuesBuffer[currentIndex % c_valuesBufferSize];
m_values[i].m_size = randomizeValueSize ? rand() % valueSize : valueSize;
currentIndex += valueSize;
}
if (isDebugMode)
{
std::cout << "Finished generating data in "
<< timer.GetElapsedTime().count() << " microseconds" << std::endl;
}
}
L4::IReadOnlyHashTable::Key GetKey(std::size_t index) const
{
return m_keys[index % m_dataSetSize];
}
L4::IReadOnlyHashTable::Value GetValue(std::size_t index) const
{
return m_values[index % m_dataSetSize];
}
private:
std::size_t m_dataSetSize;
std::uint16_t m_keySize;
std::vector<std::vector<std::uint8_t>> m_keysBuffer;
std::vector<L4::IReadOnlyHashTable::Key> m_keys;
std::vector<L4::IReadOnlyHashTable::Value> m_values;
static const std::size_t c_valuesBufferSize = 64 * 1024;
std::array<std::uint8_t, c_valuesBufferSize> m_valuesBuffer;
};
void PrintHardwareInfo()
{
SYSTEM_INFO sysInfo;
GetSystemInfo(&sysInfo);
printf("\n");
printf("Hardware information: \n");
printf("-------------------------------------\n");
printf("%22s | %10u |\n", "OEM ID", sysInfo.dwOemId);
printf("%22s | %10u |\n", "Number of processors", sysInfo.dwNumberOfProcessors);
printf("%22s | %10u |\n", "Page size", sysInfo.dwPageSize);
printf("%22s | %10u |\n", "Processor type", sysInfo.dwProcessorType);
printf("-------------------------------------\n");
printf("\n");
}
void PrintOptions(const CommandLineOptions& options)
{
printf("------------------------------------------------------\n");
printf("%39s | %10llu |\n", "Data set size", options.m_dataSetSize);
printf("%39s | %10lu |\n", "Number of hash table buckets", options.m_numBuckets);
printf("%39s | %10lu |\n", "Key size", options.m_keySize);
printf("%39s | %10lu |\n", "Value type", options.m_valueSize);
printf("%39s | %10lu |\n", "Number of iterations per GetContext()", options.m_numIterationsPerGetContext);
printf("%39s | %10lu |\n", "Epoch processing interval (ms)", options.m_epochProcessingIntervalInMilli);
printf("%39s | %10lu |\n", "Number of actions queue", options.m_numActionsQueue);
if (options.IsCachingModule())
{
printf("%39s | %10lu |\n", "Record time to live (s)", options.m_recordTimeToLiveInSeconds);
printf("%39s | %10llu |\n", "Cache size in bytes", options.m_cacheSizeInBytes);
printf("%39s | %10lu |\n", "Force time-based eviction", options.m_forceTimeBasedEviction);
}
printf("------------------------------------------------------\n\n");
}
void PrintHashTableCounters(const L4::HashTablePerfData& perfData)
{
printf("HashTableCounter:\n");
printf("----------------------------------------------------\n");
for (auto i = 0; i < static_cast<std::uint16_t>(L4::HashTablePerfCounter::Count); ++i)
{
printf("%35s | %12llu |\n",
L4::c_hashTablePerfCounterNames[i],
perfData.Get(static_cast<L4::HashTablePerfCounter>(i)));
}
printf("----------------------------------------------------\n\n");
}
L4::HashTableConfig CreateHashTableConfig(const CommandLineOptions& options)
{
return L4::HashTableConfig(
"Table1",
L4::HashTableConfig::Setting{ options.m_numBuckets },
options.IsCachingModule()
? boost::optional<L4::HashTableConfig::Cache>{
L4::HashTableConfig::Cache{
options.m_cacheSizeInBytes,
std::chrono::seconds{ options.m_recordTimeToLiveInSeconds },
options.m_forceTimeBasedEviction }}
: boost::none);
}
L4::EpochManagerConfig CreateEpochManagerConfig(const CommandLineOptions& options)
{
return L4::EpochManagerConfig(
10000U,
std::chrono::milliseconds(options.m_epochProcessingIntervalInMilli),
options.m_numActionsQueue);
}
void ReadPerfTest(const CommandLineOptions& options)
{
printf("Performing read-perf which reads all the records inserted:\n");
PrintOptions(options);
auto dataGenerator = std::make_unique<DataGenerator>(
options.m_dataSetSize,
options.m_keySize,
options.m_valueSize,
options.m_randomizeValueSize);
L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options));
const auto hashTableIndex = service.AddHashTable(CreateHashTableConfig(options));
// Insert data set.
auto context = service.GetContext();
auto& hashTable = context[hashTableIndex];
std::vector<std::uint32_t> randomIndices(options.m_dataSetSize);
for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i)
{
randomIndices[i] = i;
}
if (options.m_numThreads > 0)
{
// Randomize index only if multiple threads are running
// not to skew the results.
std::random_shuffle(randomIndices.begin(), randomIndices.end());
}
for (int i = 0; i < options.m_dataSetSize; ++i)
{
auto key = dataGenerator->GetKey(randomIndices[i]);
auto val = dataGenerator->GetValue(randomIndices[i]);
hashTable.Add(key, val);
}
std::vector<PerThreadInfoForReadTest> allInfo;
allInfo.resize(options.m_numThreads);
SynchronizedTimer overallTimer;
std::mutex mutex;
std::condition_variable cv;
const auto isCachingModule = options.IsCachingModule();
bool isReady = false;
const std::size_t dataSetSizePerThread = options.m_dataSetSize / options.m_numThreads;
for (std::uint16_t i = 0; i < options.m_numThreads; ++i)
{
auto& info = allInfo[i];
std::size_t startIndex = i * dataSetSizePerThread;
info.m_dataSetSize = (i + 1 == options.m_numThreads)
? options.m_dataSetSize - startIndex
: dataSetSizePerThread;
info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, &cv, &isReady, &overallTimer]
{
{
std::unique_lock<std::mutex> lock(mutex);
cv.wait(lock, [&] { return isReady == true; });
}
overallTimer.Start();
Timer totalTimer;
Timer getTimer;
std::size_t iteration = 0;
bool isDone = false;
while (!isDone)
{
auto context = service.GetContext();
auto& hashTable = context[hashTableIndex];
for (std::uint32_t j = 0; !isDone && j < options.m_numIterationsPerGetContext; ++j)
{
auto key = dataGenerator->GetKey(startIndex + iteration);
L4::IReadOnlyHashTable::Value val;
if (!hashTable.Get(key, val) && !isCachingModule)
{
throw std::runtime_error("Look up failure is not allowed in this test.");
}
isDone = (++iteration == info.m_dataSetSize);
}
}
overallTimer.End();
info.m_totalTime = totalTimer.GetElapsedTime();
});
}
{
std::unique_lock<std::mutex> lock(mutex);
isReady = true;
}
// Now, start the benchmarking for all threads.
cv.notify_all();
for (auto& info : allInfo)
{
info.m_thread.join();
}
PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData());
printf("Result:\n");
printf(" | Total | |\n");
printf(" | micros/op | microseconds | DataSetSize |\n");
printf(" -----------------------------------------------------------\n");
for (std::size_t i = 0; i < allInfo.size(); ++i)
{
const auto& info = allInfo[i];
printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n",
(i + 1),
static_cast<double>(info.m_totalTime.count()) / info.m_dataSetSize,
info.m_totalTime.count(),
info.m_dataSetSize);
}
printf(" -----------------------------------------------------------\n");
printf(" Overall | %11.3f | %14llu | %13llu |\n",
static_cast<double>(overallTimer.GetElapsedTime().count()) / options.m_dataSetSize,
overallTimer.GetElapsedTime().count(),
options.m_dataSetSize);
}
void WritePerfTest(const CommandLineOptions& options)
{
if (options.m_module == "overwrite-perf")
{
printf("Performing overwrite-perf (writing data with unique keys, then overwrite data with same keys):\n");
}
else
{
printf("Performing write-perf (writing data with unique keys):\n");
}
PrintOptions(options);
auto dataGenerator = std::make_unique<DataGenerator>(
options.m_dataSetSize,
options.m_keySize,
options.m_valueSize,
options.m_randomizeValueSize);
L4::LocalMemory::HashTableService service(CreateEpochManagerConfig(options));
const auto hashTableIndex = service.AddHashTable(CreateHashTableConfig(options));
if (options.m_module == "overwrite-perf")
{
std::vector<std::uint32_t> randomIndices(options.m_dataSetSize);
for (std::uint32_t i = 0U; i < options.m_dataSetSize; ++i)
{
randomIndices[i] = i;
}
if (options.m_numThreads > 0)
{
// Randomize index only if multiple threads are running
// not to skew the results.
std::random_shuffle(randomIndices.begin(), randomIndices.end());
}
auto context = service.GetContext();
auto& hashTable = context[hashTableIndex];
for (int i = 0; i < options.m_dataSetSize; ++i)
{
const auto index = randomIndices[i];
auto key = dataGenerator->GetKey(index);
auto val = dataGenerator->GetValue(index);
hashTable.Add(key, val);
}
}
std::vector<PerThreadInfoForWriteTest> allInfo;
allInfo.resize(options.m_numThreads);
SynchronizedTimer overallTimer;
std::mutex mutex;
std::condition_variable cv;
bool isReady = false;
const std::size_t dataSetSizePerThread = options.m_dataSetSize / options.m_numThreads;
for (std::uint16_t i = 0; i < options.m_numThreads; ++i)
{
auto& info = allInfo[i];
std::size_t startIndex = i * dataSetSizePerThread;
info.m_dataSetSize = (i + 1 == options.m_numThreads)
? options.m_dataSetSize - startIndex
: dataSetSizePerThread;
info.m_thread = std::thread([=, &service, &dataGenerator, &info, &mutex, &cv, &isReady, &overallTimer]
{
{
std::unique_lock<std::mutex> lock(mutex);
cv.wait(lock, [&] { return isReady == true; });
}
overallTimer.Start();
Timer totalTimer;
Timer addTimer;
std::size_t iteration = 0;
bool isDone = false;
while (!isDone)
{
auto context = service.GetContext();
auto& hashTable = context[hashTableIndex];
for (std::uint32_t j = 0; !isDone && j < options.m_numIterationsPerGetContext; ++j)
{
const auto index = startIndex + iteration;
auto key = dataGenerator->GetKey(index);
auto val = dataGenerator->GetValue(index);
hashTable.Add(key, val);
isDone = (++iteration == info.m_dataSetSize);
}
}
info.m_totalTime = totalTimer.GetElapsedTime();
overallTimer.End();
});
}
{
std::unique_lock<std::mutex> lock(mutex);
isReady = true;
}
// Now, start the benchmarking for all threads.
cv.notify_all();
for (auto& info : allInfo)
{
info.m_thread.join();
}
PrintHashTableCounters(service.GetContext()[hashTableIndex].GetPerfData());
printf("Result:\n");
printf(" | Total | |\n");
printf(" | micros/op | microseconds | DataSetSize |\n");
printf(" -----------------------------------------------------------\n");
for (std::size_t i = 0; i < allInfo.size(); ++i)
{
const auto& info = allInfo[i];
printf(" Thread #%llu | %11.3f | %14llu | %13llu |\n",
(i + 1),
static_cast<double>(info.m_totalTime.count()) / info.m_dataSetSize,
info.m_totalTime.count(),
info.m_dataSetSize);
}
printf(" -----------------------------------------------------------\n");
printf(" Overall | %11.3f | %14llu | %13llu |\n",
static_cast<double>(overallTimer.GetElapsedTime().count()) / options.m_dataSetSize,
overallTimer.GetElapsedTime().count(),
options.m_dataSetSize);
if (options.m_numThreads == 1)
{
auto& perfData = service.GetContext()[hashTableIndex].GetPerfData();
std::uint64_t totalBytes = perfData.Get(L4::HashTablePerfCounter::TotalKeySize)
+ perfData.Get(L4::HashTablePerfCounter::TotalValueSize);
auto& info = allInfo[0];
double opsPerSec = static_cast<double>(info.m_dataSetSize) / info.m_totalTime.count() * 1000000.0;
double MBPerSec = static_cast<double>(totalBytes) / info.m_totalTime.count();
printf(" %10.3f ops/sec %10.3f MB/sec\n", opsPerSec, MBPerSec);
}
}
CommandLineOptions Parse(int argc, char** argv)
{
namespace po = boost::program_options;
po::options_description general("General options");
general.add_options()
("help", "produce a help message")
("help-module", po::value<std::string>(),
"produce a help for the following modules:\n"
" write-perf\n"
" overwrite-perf\n"
" read-perf\n"
" cache-read-perf\n"
" cache-write-perf\n")
("module", po::value<std::string>(),
"Runs the given module");
po::options_description benchmarkOptions("Benchmark options.");
benchmarkOptions.add_options()
("dataSetSize", po::value<std::size_t>()->default_value(CommandLineOptions::c_defaultDataSetSize), "data set size")
("numBuckets", po::value<std::uint32_t>()->default_value(CommandLineOptions::c_defaultNumBuckets), "number of buckets")
("keySize", po::value<std::uint16_t>()->default_value(CommandLineOptions::c_defaultKeySize), "key size in bytes")
("valueSize", po::value<std::uint32_t>()->default_value(CommandLineOptions::c_defaultValueSize), "value size in bytes")
("randomizeValueSize", "randomize value size")
("numIterationsPerGetContext", po::value<std::uint32_t>()->default_value(CommandLineOptions::c_defaultNumIterationsPerGetContext), "number of iterations per GetContext()")
("numThreads", po::value<std::uint16_t>()->default_value(CommandLineOptions::c_defaultNumThreads), "number of threads to create")
("epochProcessingInterval", po::value<std::uint32_t>()->default_value(CommandLineOptions::c_defaultEpochProcessingIntervalInMilli), "epoch processing interval (ms)")
("numActionsQueue", po::value<std::uint8_t>()->default_value(CommandLineOptions::c_defaultNumActionsQueue), "number of actions queue")
("recordTimeToLive", po::value<std::uint32_t>()->default_value(CommandLineOptions::c_defaultRecordTimeToLiveInSeconds), "record time to live (s)")
("cacheSize", po::value<std::uint64_t>()->default_value(CommandLineOptions::c_defaultCacheSizeInBytes), "cache size in bytes")
("forceTimeBasedEviction", po::value<bool>()->default_value(CommandLineOptions::c_defaultForceTimeBasedEviction), "force time based eviction");
po::options_description all("Allowed options");
all.add(general).add(benchmarkOptions);
po::variables_map vm;
po::store(po::parse_command_line(argc, argv, all), vm);
po::notify(vm);
CommandLineOptions options;
if (vm.count("help"))
{
std::cout << all;
}
else if (vm.count("module"))
{
options.m_module = vm["module"].as<std::string>();
if (vm.count("dataSetSize"))
{
options.m_dataSetSize = vm["dataSetSize"].as<std::size_t>();
}
if (vm.count("numBuckets"))
{
options.m_numBuckets = vm["numBuckets"].as<std::uint32_t>();
}
if (vm.count("keySize"))
{
options.m_keySize = vm["keySize"].as<std::uint16_t>();
}
if (vm.count("valueSize"))
{
options.m_valueSize = vm["valueSize"].as<std::uint32_t>();
}
if (vm.count("randomizeValueSize"))
{
options.m_randomizeValueSize = true;
}
if (vm.count("numIterationsPerGetContext"))
{
options.m_numIterationsPerGetContext = vm["numIterationsPerGetContext"].as<std::uint32_t>();
}
if (vm.count("numThreads"))
{
options.m_numThreads = vm["numThreads"].as<std::uint16_t>();
}
if (vm.count("epochProcessingInterval"))
{
options.m_epochProcessingIntervalInMilli = vm["epochProcessingInterval"].as<std::uint32_t>();
}
if (vm.count("numActionsQueue"))
{
options.m_numActionsQueue = vm["numActionsQueue"].as<std::uint8_t>();
}
if (vm.count("recordTimeToLive"))
{
options.m_recordTimeToLiveInSeconds = vm["recordTimeToLive"].as<std::uint32_t>();
}
if (vm.count("cacheSize"))
{
options.m_cacheSizeInBytes = vm["cacheSize"].as<std::uint64_t>();
}
if (vm.count("forceTimeBasedEviction"))
{
options.m_forceTimeBasedEviction = vm["forceTimeBasedEviction"].as<bool>();
}
}
else
{
std::cout << all;
}
return options;
}
int main(int argc, char** argv)
{
auto options = Parse(argc, argv);
if (options.m_module.empty())
{
return 0;
}
std::srand(static_cast<unsigned int>(time(NULL)));
PrintHardwareInfo();
if (options.m_module == "write-perf"
|| options.m_module == "overwrite-perf"
|| options.m_module == "cache-write-perf")
{
WritePerfTest(options);
}
else if (options.m_module == "read-perf"
|| options.m_module == "cache-read-perf")
{
ReadPerfTest(options);
}
else
{
std::cout << "Unknown module: " << options.m_module << std::endl;
}
return 0;
}

Просмотреть файл

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="boost" version="1.63.0.0" targetFramework="native" />
<package id="boost_program_options-vc140" version="1.63.0.0" targetFramework="native" />
<package id="boost_thread-vc140" version="1.63.0.0" targetFramework="native" />
</packages>

84
Examples/Examples.vcxproj Normal file
Просмотреть файл

@ -0,0 +1,84 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{9672B9F5-84A6-4063-972C-A4DC23200B42}</ProjectGuid>
<ProjectName>Examples</ProjectName>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>$(SolutionDir)inc\L4;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<Optimization Condition="'$(Configuration)|$(Platform)'=='Release|x64'">MaxSpeed</Optimization>
<InlineFunctionExpansion Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AnySuitable</InlineFunctionExpansion>
<IntrinsicFunctions Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</IntrinsicFunctions>
<ProgramDataBaseFileName>
</ProgramDataBaseFileName>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="main.cpp" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Build\L4.vcxproj">
<Project>{b7846115-88f1-470b-a625-9de0c29229bb}</Project>
</ProjectReference>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Context.h" />
<ClInclude Include="EpochManager.h" />
<ClInclude Include="HashTableManager.h" />
<ClInclude Include="HashTableService.h" />
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
<Import Project="..\packages\boost.1.63.0.0\build\native\boost.targets" Condition="Exists('..\packages\boost.1.63.0.0\build\native\boost.targets')" />
<Import Project="..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets" Condition="Exists('..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets')" />
</ImportGroup>
<Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
<PropertyGroup>
<ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
</PropertyGroup>
<Error Condition="!Exists('..\packages\boost.1.63.0.0\build\native\boost.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\boost.1.63.0.0\build\native\boost.targets'))" />
<Error Condition="!Exists('..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets'))" />
</Target>
</Project>

Просмотреть файл

@ -0,0 +1,39 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Resource Files">
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="main.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="HashTableService.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="EpochManager.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Context.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="HashTableManager.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />
</ItemGroup>
</Project>

98
Examples/main.cpp Normal file
Просмотреть файл

@ -0,0 +1,98 @@
#include "Log/IPerfLogger.h"
#include "LocalMemory/HashTableService.h"
#include <string>
#include <iostream>
#include <fstream>
#include <tchar.h>
#include <stdarg.h>
#include <memory>
using namespace L4;
class ConsolePerfLogger : public IPerfLogger
{
virtual void Log(const IData& perfData) override
{
for (auto i = 0; i < static_cast<std::uint16_t>(ServerPerfCounter::Count); ++i)
{
std::cout << c_serverPerfCounterNames[i] << ": "
<< perfData.GetServerPerfData().Get(static_cast<ServerPerfCounter>(i)) << std::endl;
}
const auto& hashTablesPerfData = perfData.GetHashTablesPerfData();
for (const auto& entry : hashTablesPerfData)
{
std::cout << "Hash table '" << entry.first << "'" << std::endl;
for (auto j = 0; j < static_cast<std::uint16_t>(HashTablePerfCounter::Count); ++j)
{
std::cout << c_hashTablePerfCounterNames[j] << ": "
<< entry.second.get().Get(static_cast<HashTablePerfCounter>(j)) << std::endl;
}
}
std::cout << std::endl;
}
};
int main(int argc, char** argv)
{
(void)argc;
(void)argv;
LocalMemory::HashTableService service;
auto index = service.AddHashTable(
HashTableConfig("Table1", HashTableConfig::Setting{ 1000000 }));
static constexpr int keySize = 100;
static constexpr int valSize = 2000;
char bufKey[keySize];
char bufVal[valSize];
IWritableHashTable::Key key;
key.m_data = reinterpret_cast<std::uint8_t*>(bufKey);
IWritableHashTable::Value val;
val.m_data = reinterpret_cast<std::uint8_t*>(bufVal);
std::ifstream file;
file.open(argv[1], std::ifstream::in);
std::cout << "Opening " << argv[1] << std::endl;
static const int BufferLength = 4096;
char buffer[BufferLength];
auto totalTime = 0U;
int numLines = 0;
while (file.getline(buffer, BufferLength))
{
auto context = service.GetContext();
auto& hashTable = context[index];
char* nextToken = nullptr;
const char* keyStr = strtok_s(buffer, "\t", &nextToken);
const char* valStr = strtok_s(nullptr, "\t", &nextToken);
key.m_data = reinterpret_cast<const std::uint8_t*>(keyStr);
key.m_size = static_cast<std::uint16_t>(strlen(keyStr));
val.m_data = reinterpret_cast<const std::uint8_t*>(valStr);
val.m_size = static_cast<std::uint32_t>(strlen(valStr));
hashTable.Add(key, val);
++numLines;
}
std::cout<< "Total Add() time" << totalTime << std::endl;
std::cout << "Added " << numLines << " lines." << std::endl;
return 0;
}

5
Examples/packages.config Normal file
Просмотреть файл

@ -0,0 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="boost" version="1.63.0.0" targetFramework="native" />
<package id="boost_thread-vc140" version="1.63.0.0" targetFramework="native" />
</packages>

40
L4.sln Normal file
Просмотреть файл

@ -0,0 +1,40 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 14
VisualStudioVersion = 14.0.25420.1
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "L4", "Build\L4.vcxproj", "{B7846115-88F1-470B-A625-9DE0C29229BB}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Unittests", "Unittests\Unittests.vcxproj", "{8122529E-61CB-430B-A089-B12E63FC361B}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Examples", "Examples\Examples.vcxproj", "{9672B9F5-84A6-4063-972C-A4DC23200B42}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Benchmark", "Benchmark\Benchmark.vcxproj", "{B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x64 = Debug|x64
Release|x64 = Release|x64
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{B7846115-88F1-470B-A625-9DE0C29229BB}.Debug|x64.ActiveCfg = Debug|x64
{B7846115-88F1-470B-A625-9DE0C29229BB}.Debug|x64.Build.0 = Debug|x64
{B7846115-88F1-470B-A625-9DE0C29229BB}.Release|x64.ActiveCfg = Release|x64
{B7846115-88F1-470B-A625-9DE0C29229BB}.Release|x64.Build.0 = Release|x64
{8122529E-61CB-430B-A089-B12E63FC361B}.Debug|x64.ActiveCfg = Debug|x64
{8122529E-61CB-430B-A089-B12E63FC361B}.Debug|x64.Build.0 = Debug|x64
{8122529E-61CB-430B-A089-B12E63FC361B}.Release|x64.ActiveCfg = Release|x64
{8122529E-61CB-430B-A089-B12E63FC361B}.Release|x64.Build.0 = Release|x64
{9672B9F5-84A6-4063-972C-A4DC23200B42}.Debug|x64.ActiveCfg = Debug|x64
{9672B9F5-84A6-4063-972C-A4DC23200B42}.Debug|x64.Build.0 = Debug|x64
{9672B9F5-84A6-4063-972C-A4DC23200B42}.Release|x64.ActiveCfg = Release|x64
{9672B9F5-84A6-4063-972C-A4DC23200B42}.Release|x64.Build.0 = Release|x64
{B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Debug|x64.ActiveCfg = Debug|x64
{B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Debug|x64.Build.0 = Debug|x64
{B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Release|x64.ActiveCfg = Release|x64
{B8FBA54E-04F3-4CC2-BBB8-22B35EA00F33}.Release|x64.Build.0 = Release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal

Просмотреть файл

@ -0,0 +1,615 @@
#include "stdafx.h"
#include "Utils.h"
#include "Mocks.h"
#include "CheckedAllocator.h"
#include "L4/HashTable/Common/Record.h"
#include "L4/HashTable/Cache/Metadata.h"
#include "L4/HashTable/Cache/HashTable.h"
#include <algorithm>
#include <sstream>
namespace L4
{
namespace UnitTests
{
using namespace HashTable::Cache;
using namespace std::chrono;
class MockClock
{
public:
MockClock() = default;
seconds GetCurrentEpochTime() const
{
return s_currentEpochTime;
}
static void SetEpochTime(seconds time)
{
s_currentEpochTime = time;
}
static void IncrementEpochTime(seconds increment)
{
s_currentEpochTime += increment;
}
private:
static seconds s_currentEpochTime;
};
seconds MockClock::s_currentEpochTime{ 0U };
class CacheHashTableTestFixture
{
public:
using Allocator = CheckedAllocator<>;
using CacheHashTable = WritableHashTable<Allocator, MockClock>;
using ReadOnlyCacheHashTable = ReadOnlyHashTable<Allocator, MockClock>;
using HashTable = CacheHashTable::HashTable;
CacheHashTableTestFixture()
: m_allocator{}
, m_hashTable { HashTable::Setting{ 100U }, m_allocator }
, m_epochManager{}
{
MockClock::SetEpochTime(seconds{ 0U });
}
CacheHashTableTestFixture(const CacheHashTableTestFixture&) = delete;
CacheHashTableTestFixture& operator=(const CacheHashTableTestFixture&) = delete;
protected:
template <typename TCacheHashTable>
bool Get(TCacheHashTable& hashTable, const std::string& key, IReadOnlyHashTable::Value& value)
{
return hashTable.Get(
Utils::ConvertFromString<IReadOnlyHashTable::Key>(key.c_str()),
value);
}
void Add(CacheHashTable& hashTable, const std::string& key, const std::string& value)
{
hashTable.Add(
Utils::ConvertFromString<IReadOnlyHashTable::Key>(key.c_str()),
Utils::ConvertFromString<IReadOnlyHashTable::Value>(value.c_str()));
}
void Remove(CacheHashTable& hashTable, const std::string& key)
{
hashTable.Remove(Utils::ConvertFromString<IReadOnlyHashTable::Key>(key.c_str()));
}
template <typename TCacheHashTable>
bool CheckRecord(TCacheHashTable& hashTable, const std::string& key, const std::string& expectedValue)
{
IReadOnlyHashTable::Value value;
return Get(hashTable, key, value) && AreTheSame(value, expectedValue);
}
bool AreTheSame(const IReadOnlyHashTable::Value& actual, const std::string& expected)
{
return (actual.m_size == expected.size())
&& !memcmp(actual.m_data, expected.c_str(), actual.m_size);
}
template <typename Blob>
bool Exist(const Blob& actual, const std::vector<std::string>& expectedSet)
{
const std::string actualStr(
reinterpret_cast<const char*>(actual.m_data),
actual.m_size);
return std::find(expectedSet.cbegin(), expectedSet.cend(), actualStr) != expectedSet.cend();
}
Allocator m_allocator;
HashTable m_hashTable;
MockEpochManager m_epochManager;
MockClock m_clock;
};
BOOST_AUTO_TEST_SUITE(CacheHashTableTests)
BOOST_AUTO_TEST_CASE(MetadataTest)
{
std::vector<std::uint8_t> buffer(20);
// The following will test with 1..8 byte alignments.
for (std::uint16_t i = 0U; i < 8U; ++i)
{
std::uint32_t* metadataBuffer = reinterpret_cast<std::uint32_t*>(buffer.data() + i);
seconds currentEpochTime{ 0x7FABCDEF };
Metadata metadata{ metadataBuffer, currentEpochTime };
BOOST_CHECK(currentEpochTime == metadata.GetEpochTime());
// 10 seconds have elapsed.
currentEpochTime += seconds{ 10U };
// Check the expiration based on the time to live value.
BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{ 15 }));
BOOST_CHECK(!metadata.IsExpired(currentEpochTime, seconds{ 10 }));
BOOST_CHECK(metadata.IsExpired(currentEpochTime, seconds{ 5U }));
// Test access state.
BOOST_CHECK(!metadata.IsAccessed());
metadata.UpdateAccessStatus(true);
BOOST_CHECK(metadata.IsAccessed());
metadata.UpdateAccessStatus(false);
BOOST_CHECK(!metadata.IsAccessed());
}
}
BOOST_FIXTURE_TEST_CASE(ExpirationTest, CacheHashTableTestFixture)
{
// Don't care about evict in this test case, so make the cache size big.
constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF;
constexpr seconds c_recordTimeToLive{ 20U };
CacheHashTable hashTable(
m_hashTable,
m_epochManager,
c_maxCacheSizeInBytes,
c_recordTimeToLive,
false);
const std::vector<std::pair<std::string, std::string>> c_keyValuePairs =
{
{ "key1", "value1" },
{ "key2", "value2" },
{ "key3", "value3" },
{ "key4", "value4" },
{ "key5", "value5" }
};
// Add 5 records at a different epoch time (10 seconds increment).
for (const auto& pair : c_keyValuePairs)
{
MockClock::IncrementEpochTime(seconds{ 10 });
Add(hashTable, pair.first, pair.second);
// Make sure the records can be retrieved right away. The record has not been
// expired since the clock hasn't moved yet.
BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second));
}
const auto& perfData = hashTable.GetPerfData();
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::CacheHitCount, 5 }
});
// Now we have the following data sets:
// | Key | Value | Creation time |
// | key1 | value1 | 10 |
// | key2 | value2 | 20 |
// | key3 | value3 | 30 |
// | key4 | value4 | 40 |
// | key5 | value5 | 50 |
// And the current clock is at 50.
// Do look ups and check expired records.
for (const auto& pair : c_keyValuePairs)
{
IReadOnlyHashTable::Value value;
// Our time to live value is 20, so key0 and key0 records should be expired.
if (pair.first == "key1" || pair.first == "key2")
{
BOOST_CHECK(!Get(hashTable, pair.first, value));
}
else
{
BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second));
}
}
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::CacheHitCount, 8 },
{ HashTablePerfCounter::CacheMissCount, 2 }
});
MockClock::IncrementEpochTime(seconds{ 100 });
// All the records should be expired now.
for (const auto& pair : c_keyValuePairs)
{
IReadOnlyHashTable::Value value;
BOOST_CHECK(!Get(hashTable, pair.first, value));
}
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::CacheHitCount, 8 },
{ HashTablePerfCounter::CacheMissCount, 7 }
});
}
BOOST_FIXTURE_TEST_CASE(CacheHashTableIteratorTest, CacheHashTableTestFixture)
{
// Don't care about evict in this test case, so make the cache size big.
constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF;
constexpr seconds c_recordTimeToLive{ 20U };
CacheHashTable hashTable(
m_hashTable,
m_epochManager,
c_maxCacheSizeInBytes,
c_recordTimeToLive,
false);
const std::vector<std::string> c_keys = { "key1", "key2", "key3", "key4", "key5" };
const std::vector<std::string> c_vals = { "val1", "val2", "val3", "val4", "val5" };
// Add 5 records at a different epoch time (3 seconds increment).
for (std::size_t i = 0; i < c_keys.size(); ++i)
{
MockClock::IncrementEpochTime(seconds{ 3 });
Add(hashTable, c_keys[i], c_vals[i]);
}
// Now we have the following data sets:
// | Key | Value | Creation time |
// | key1 | value1 | 3 |
// | key2 | value2 | 6 |
// | key3 | value3 | 9 |
// | key4 | value4 | 12 |
// | key5 | value5 | 15 |
// And the current clock is at 15.
auto iterator = hashTable.GetIterator();
std::uint16_t numRecords = 0;
while (iterator->MoveNext())
{
++numRecords;
BOOST_CHECK(Exist(iterator->GetKey(), c_keys));
BOOST_CHECK(Exist(iterator->GetValue(), c_vals));
}
BOOST_CHECK_EQUAL(numRecords, 5);
// The clock becomes 30 and key1, key2 and key3 should expire.
MockClock::IncrementEpochTime(seconds{ 15 });
iterator = hashTable.GetIterator();
numRecords = 0;
while (iterator->MoveNext())
{
++numRecords;
BOOST_CHECK(
Exist(
iterator->GetKey(),
std::vector<std::string>{ c_keys.cbegin() + 2, c_keys.cend() }));
BOOST_CHECK(
Exist(
iterator->GetValue(),
std::vector<std::string>{ c_vals.cbegin() + 2, c_vals.cend() }));
}
BOOST_CHECK_EQUAL(numRecords, 2);
// The clock becomes 40 and all records should be expired now.
MockClock::IncrementEpochTime(seconds{ 10 });
iterator = hashTable.GetIterator();
while (iterator->MoveNext())
{
BOOST_CHECK(false);
}
}
BOOST_FIXTURE_TEST_CASE(TimeBasedEvictionTest, CacheHashTableTestFixture)
{
// We only care about time-based eviction in this test, so make the cache size big.
constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF;
constexpr seconds c_recordTimeToLive{ 10U };
// Hash table with one bucket makes testing the time-based eviction easy.
HashTable internalHashTable{ HashTable::Setting{ 1 }, m_allocator };
CacheHashTable hashTable(
internalHashTable,
m_epochManager,
c_maxCacheSizeInBytes,
c_recordTimeToLive,
true);
const std::vector<std::pair<std::string, std::string>> c_keyValuePairs =
{
{ "key1", "value1" },
{ "key2", "value2" },
{ "key3", "value3" },
{ "key4", "value4" },
{ "key5", "value5" }
};
for (const auto& pair : c_keyValuePairs)
{
Add(hashTable, pair.first, pair.second);
BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second));
}
const auto& perfData = hashTable.GetPerfData();
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::CacheHitCount, 5 },
{ HashTablePerfCounter::RecordsCount, 5 },
{ HashTablePerfCounter::EvictedRecordsCount, 0 },
});
MockClock::IncrementEpochTime(seconds{ 20 });
// All the records should be expired now.
for (const auto& pair : c_keyValuePairs)
{
IReadOnlyHashTable::Value value;
BOOST_CHECK(!Get(hashTable, pair.first, value));
}
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::CacheHitCount, 5 },
{ HashTablePerfCounter::CacheMissCount, 5 },
{ HashTablePerfCounter::RecordsCount, 5 },
{ HashTablePerfCounter::EvictedRecordsCount, 0 },
});
// Now try to add one record and all the expired records should be evicted.
const auto& keyValuePair = c_keyValuePairs[0];
Add(hashTable, keyValuePair.first, keyValuePair.second);
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 1 },
{ HashTablePerfCounter::EvictedRecordsCount, 5 },
});
}
BOOST_FIXTURE_TEST_CASE(EvcitAllRecordsTest, CacheHashTableTestFixture)
{
const auto& perfData = m_hashTable.m_perfData;
const auto initialTotalIndexSize = perfData.Get(HashTablePerfCounter::TotalIndexSize);
const std::uint64_t c_maxCacheSizeInBytes = 500 + initialTotalIndexSize;
constexpr seconds c_recordTimeToLive{ 5 };
CacheHashTable hashTable{
m_hashTable,
m_epochManager,
c_maxCacheSizeInBytes,
c_recordTimeToLive,
false };
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::EvictedRecordsCount, 0 },
});
const std::vector<std::pair<std::string, std::string>> c_keyValuePairs =
{
{ "key1", "value1" },
{ "key2", "value2" },
{ "key3", "value3" },
{ "key4", "value4" },
{ "key5", "value5" }
};
for (const auto& pair : c_keyValuePairs)
{
Add(hashTable, pair.first, pair.second);
}
using L4::HashTable::RecordSerializer;
// Variable key/value sizes.
const auto recordOverhead = RecordSerializer{ 0U, 0U }.CalculateRecordOverhead();
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, c_keyValuePairs.size() },
{ HashTablePerfCounter::TotalIndexSize, initialTotalIndexSize + (c_keyValuePairs.size() * recordOverhead) },
{ HashTablePerfCounter::EvictedRecordsCount, 0 },
});
// Make sure all data records added are present and update the access status for each
// record in order to test that accessed records are deleted when it's under memory constraint.
for (const auto& pair : c_keyValuePairs)
{
BOOST_CHECK(CheckRecord(hashTable, pair.first, pair.second));
}
// Now insert a record that will force all the records to be evicted due to size.
std::string bigRecordKeyStr(10, 'k');
std::string bigRecordValStr(500, 'v');
Add(hashTable, bigRecordKeyStr, bigRecordValStr);
// Make sure all the previously inserted records are evicted.
for (const auto& pair : c_keyValuePairs)
{
IReadOnlyHashTable::Value value;
BOOST_CHECK(!Get(hashTable, pair.first, value));
}
// Make sure the big record is inserted.
BOOST_CHECK(CheckRecord(hashTable, bigRecordKeyStr, bigRecordValStr));
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 1 },
{ HashTablePerfCounter::TotalIndexSize, initialTotalIndexSize + (1 * recordOverhead) },
{ HashTablePerfCounter::EvictedRecordsCount, c_keyValuePairs.size() },
});
}
BOOST_FIXTURE_TEST_CASE(EvcitRecordsBasedOnAccessStatusTest, CacheHashTableTestFixture)
{
const std::uint64_t c_maxCacheSizeInBytes
= 2000 + m_hashTable.m_perfData.Get(HashTablePerfCounter::TotalIndexSize);
const seconds c_recordTimeToLive{ 5 };
CacheHashTable hashTable(
m_hashTable,
m_epochManager,
c_maxCacheSizeInBytes,
c_recordTimeToLive,
false);
constexpr std::uint32_t c_valueSize = 100;
const std::string c_valStr(c_valueSize, 'v');
const auto& perfData = hashTable.GetPerfData();
std::uint16_t key = 1;
while ((static_cast<std::uint64_t>(perfData.Get(HashTablePerfCounter::TotalIndexSize))
+ perfData.Get(HashTablePerfCounter::TotalKeySize)
+ perfData.Get(HashTablePerfCounter::TotalValueSize)
+ c_valueSize)
< c_maxCacheSizeInBytes)
{
std::stringstream ss;
ss << "key" << key;
Add(hashTable, ss.str(), c_valStr);
++key;
}
// Make sure no eviction happened.
BOOST_CHECK_EQUAL(m_epochManager.m_numRegisterActionsCalled, 0U);
// Look up with the "key1" key to update the access state.
BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr));
// Now add a new key, which triggers an eviction, but deletes other records than the "key1" record.
Add(hashTable, "newkey", c_valStr);
// Now, eviction should have happened.
BOOST_CHECK_GE(m_epochManager.m_numRegisterActionsCalled, 1U);
// The "key1" record should not have been evicted.
BOOST_CHECK(CheckRecord(hashTable, "key1", c_valStr));
// Make sure the new key is actually added.
BOOST_CHECK(CheckRecord(hashTable, "newkey", c_valStr));
}
// This is similar to the one in ReadWriteHashTableTest, but necessary since cache store adds the meta values.
BOOST_FIXTURE_TEST_CASE(FixedKeyValueHashTableTest, CacheHashTableTestFixture)
{
// Fixed 4 byte keys and 6 byte values.
std::vector<HashTable::Setting> settings =
{
HashTable::Setting{ 100, 200, 4, 0 },
HashTable::Setting{ 100, 200, 0, 6 },
HashTable::Setting{ 100, 200, 4, 6 }
};
for (const auto& setting : settings)
{
// Don't care about evict in this test case, so make the cache size big.
constexpr std::uint64_t c_maxCacheSizeInBytes = 0xFFFFFFFF;
constexpr seconds c_recordTimeToLive{ 20U };
HashTable hashTable{ setting, m_allocator };
CacheHashTable writableHashTable{
hashTable,
m_epochManager,
c_maxCacheSizeInBytes,
c_recordTimeToLive,
false };
ReadOnlyCacheHashTable readOnlyHashTable{ hashTable, c_recordTimeToLive };
constexpr std::uint8_t c_numRecords = 10;
// Add records.
for (std::uint8_t i = 0; i < c_numRecords; ++i)
{
Add(writableHashTable, "key" + std::to_string(i), "value" + std::to_string(i));
}
Utils::ValidateCounters(
writableHashTable.GetPerfData(),
{
{ HashTablePerfCounter::RecordsCount, 10 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::TotalKeySize, 40 },
{ HashTablePerfCounter::TotalValueSize, 100 },
{ HashTablePerfCounter::MinKeySize, 4 },
{ HashTablePerfCounter::MaxKeySize, 4 },
{ HashTablePerfCounter::MinValueSize, 10 },
{ HashTablePerfCounter::MaxValueSize, 10 }
});
// Validate all the records added.
for (std::uint8_t i = 0; i < c_numRecords; ++i)
{
CheckRecord(readOnlyHashTable, "key" + std::to_string(i), "value" + std::to_string(i));
}
// Remove first half of the records.
for (std::uint8_t i = 0; i < c_numRecords / 2; ++i)
{
Remove(writableHashTable, "key" + std::to_string(i));
}
Utils::ValidateCounters(
writableHashTable.GetPerfData(),
{
{ HashTablePerfCounter::RecordsCount, 5 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::TotalKeySize, 20 },
{ HashTablePerfCounter::TotalValueSize, 50 }
});
// Verify the records.
for (std::uint8_t i = 0; i < c_numRecords; ++i)
{
if (i < (c_numRecords / 2))
{
IReadOnlyHashTable::Value value;
BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value));
}
else
{
CheckRecord(readOnlyHashTable, "key" + std::to_string(i), "value" + std::to_string(i));
}
}
// Expire all the records.
MockClock::IncrementEpochTime(seconds{ 100 });
// Verify the records.
for (std::uint8_t i = 0; i < c_numRecords; ++i)
{
IReadOnlyHashTable::Value value;
BOOST_CHECK(!Get(readOnlyHashTable, "key" + std::to_string(i), value));
}
}
}
BOOST_AUTO_TEST_SUITE_END()
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,67 @@
#pragma once
#include <memory>
#include <set>
#include <boost/test/unit_test.hpp>
namespace L4
{
namespace UnitTests
{
struct AllocationAddressHolder : public std::set<void*>
{
~AllocationAddressHolder()
{
BOOST_REQUIRE(empty());
}
};
template <typename T = void>
class CheckedAllocator : public std::allocator<T>
{
public:
using Base = std::allocator<T>;
template<class U>
struct rebind
{
typedef CheckedAllocator<U> other;
};
CheckedAllocator()
: m_allocationAddresses{ std::make_shared<AllocationAddressHolder>() }
{}
CheckedAllocator(const CheckedAllocator<T>&) = default;
template<class U>
CheckedAllocator(const CheckedAllocator<U>& other)
: m_allocationAddresses{ other.m_allocationAddresses }
{}
template<class U>
CheckedAllocator<T>& operator=(const CheckedAllocator<U>& other)
{
m_allocationAddresses = other.m_allocationAddresses;
return (*this);
}
pointer allocate(std::size_t count, std::allocator<void>::const_pointer hint = 0)
{
auto address = Base::allocate(count, hint);
BOOST_REQUIRE(m_allocationAddresses->insert(address).second);
return address;
}
void deallocate(pointer ptr, std::size_t count)
{
BOOST_REQUIRE(m_allocationAddresses->erase(ptr) == 1);
Base::deallocate(ptr, count);
}
std::shared_ptr<AllocationAddressHolder> m_allocationAddresses;
};
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,187 @@
#include "stdafx.h"
#include "Utils.h"
#include "L4/Epoch/EpochQueue.h"
#include "L4/Epoch/EpochActionManager.h"
#include "L4/LocalMemory/EpochManager.h"
#include "L4/Log/PerfCounter.h"
#include "L4/Utils/Lock.h"
#include <atomic>
namespace L4
{
namespace UnitTests
{
BOOST_AUTO_TEST_SUITE(EpochManagerTests)
BOOST_AUTO_TEST_CASE(EpochRefManagerTest)
{
std::uint64_t currentEpochCounter = 5U;
const std::uint32_t c_epochQueueSize = 100U;
using EpochQueue = EpochQueue<
boost::shared_lock_guard<L4::Utils::ReaderWriterLockSlim>,
std::lock_guard<L4::Utils::ReaderWriterLockSlim>>;
EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize);
// Initially the ref count at the current epoch counter should be 0.
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U);
EpochRefManager<EpochQueue> epochManager(epochQueue);
BOOST_CHECK_EQUAL(epochManager.AddRef(), currentEpochCounter);
// Validate that a reference count is incremented at the current epoch counter.
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 1U);
epochManager.RemoveRef(currentEpochCounter);
// Validate that a reference count is back to 0.
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U);
// Decrementing a reference counter when it is already 0 will result in an exception.
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
epochManager.RemoveRef(currentEpochCounter);,
"Reference counter is invalid.");
}
BOOST_AUTO_TEST_CASE(EpochCounterManagerTest)
{
std::uint64_t currentEpochCounter = 0U;
const std::uint32_t c_epochQueueSize = 100U;
using EpochQueue = EpochQueue<
boost::shared_lock_guard<L4::Utils::ReaderWriterLockSlim>,
std::lock_guard<L4::Utils::ReaderWriterLockSlim>>;
EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize);
EpochCounterManager<EpochQueue> epochCounterManager(epochQueue);
// If RemoveUnreferenceEpochCounters() is called when m_fonrtIndex and m_backIndex are
// the same, it will just return either value.
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
// Add two epoch counts.
++currentEpochCounter;
++currentEpochCounter;
epochCounterManager.AddNewEpoch();
epochCounterManager.AddNewEpoch();
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, 0U);
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[epochQueue.m_frontIndex], 0U);
// Since the m_frontIndex's reference count was zero, it will be incremented
// all the way to currentEpochCounter.
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
EpochRefManager<EpochQueue> epochRefManager(epochQueue);
// Now add a reference at the currentEpochCounter;
const auto epochCounterReferenced = epochRefManager.AddRef();
BOOST_CHECK_EQUAL(epochCounterReferenced, currentEpochCounter);
// Calling RemoveUnreferenceEpochCounters() should just return currentEpochCounter
// since m_frontIndex and m_backIndex is the same. (Not affected by adding a reference yet).
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
// Add one epoch count.
++currentEpochCounter;
epochCounterManager.AddNewEpoch();
// Now RemoveUnreferenceEpochCounters() should return epochCounterReferenced because
// of the reference count.
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), epochCounterReferenced);
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, epochCounterReferenced);
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
// Remove the reference.
epochRefManager.RemoveRef(epochCounterReferenced);
// Now RemoveUnreferenceEpochCounters() should return currentEpochCounter and m_frontIndex
// should be in sync with m_backIndex.
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
}
BOOST_AUTO_TEST_CASE(EpochActionManagerTest)
{
EpochActionManager actionManager(2U);
bool isAction1Called = false;
bool isAction2Called = false;
auto action1 = [&]() { isAction1Called = true; };
auto action2 = [&]() { isAction2Called = true; };
// Register action1 and action2 at epoch count 5 and 6 respectively.
actionManager.RegisterAction(5U, action1);
actionManager.RegisterAction(6U, action2);
BOOST_CHECK(!isAction1Called && !isAction2Called);
actionManager.PerformActions(4);
BOOST_CHECK(!isAction1Called && !isAction2Called);
actionManager.PerformActions(5);
BOOST_CHECK(!isAction1Called && !isAction2Called);
actionManager.PerformActions(6);
BOOST_CHECK(isAction1Called && !isAction2Called);
actionManager.PerformActions(7);
BOOST_CHECK(isAction1Called && isAction2Called);
}
BOOST_AUTO_TEST_CASE(EpochManagerTest)
{
ServerPerfData perfData;
LocalMemory::EpochManager epochManager(
EpochManagerConfig(100000U, std::chrono::milliseconds(5U), 1U),
perfData);
std::atomic<bool> isActionCalled = false;
auto action = [&]() { isActionCalled = true; };
auto epochCounterReferenced = epochManager.GetEpochRefManager().AddRef();
epochManager.RegisterAction(action);
// Justification for using sleep_for in unit tests:
// - EpochManager already uses an internal thread which wakes up and perform a task
// in a given interval and when the class is destroyed, there is a mechanism for
// waiting for the thread anyway. It's more crucial to test the end to end scenario this way.
// - The overall execution time for this test is less than 50 milliseconds.
auto initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue);
while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2)
{
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
BOOST_CHECK(!isActionCalled);
epochManager.GetEpochRefManager().RemoveRef(epochCounterReferenced);
initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue);
while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2)
{
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
BOOST_CHECK(isActionCalled);
}
BOOST_AUTO_TEST_SUITE_END()
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,65 @@
#include "stdafx.h"
#include "Utils.h"
#include "Mocks.h"
#include "L4/HashTable/Config.h"
#include "L4/HashTable/IHashTable.h"
#include "L4/LocalMemory/HashTableManager.h"
namespace L4
{
namespace UnitTests
{
template <typename Store>
static void ValidateRecord(
const Store& store,
const char* expectedKeyStr,
const char* expectedValueStr)
{
IReadOnlyHashTable::Value actualValue;
auto expectedValue = Utils::ConvertFromString<IReadOnlyHashTable::Value>(expectedValueStr);
BOOST_CHECK(store.Get(Utils::ConvertFromString<IReadOnlyHashTable::Key>(expectedKeyStr), actualValue));
BOOST_CHECK(actualValue.m_size == expectedValue.m_size);
BOOST_CHECK(!memcmp(actualValue.m_data, expectedValue.m_data, expectedValue.m_size));
}
BOOST_AUTO_TEST_CASE(HashTableManagerTest)
{
MockEpochManager epochManager;
PerfData perfData;
LocalMemory::HashTableManager htManager;
const auto ht1Index = htManager.Add(
HashTableConfig("HashTable1", HashTableConfig::Setting(100U)),
epochManager,
std::allocator<void>());
const auto ht2Index = htManager.Add(
HashTableConfig("HashTable2", HashTableConfig::Setting(200U)),
epochManager,
std::allocator<void>());
{
auto& hashTable1 = htManager.GetHashTable("HashTable1");
hashTable1.Add(
Utils::ConvertFromString<IReadOnlyHashTable::Key>("HashTable1Key"),
Utils::ConvertFromString<IReadOnlyHashTable::Value>("HashTable1Value"));
auto& hashTable2 = htManager.GetHashTable("HashTable2");
hashTable2.Add(
Utils::ConvertFromString<IReadOnlyHashTable::Key>("HashTable2Key"),
Utils::ConvertFromString<IReadOnlyHashTable::Value>("HashTable2Value"));
}
ValidateRecord(
htManager.GetHashTable(ht1Index),
"HashTable1Key",
"HashTable1Value");
ValidateRecord(
htManager.GetHashTable(ht2Index),
"HashTable2Key",
"HashTable2Value");
}
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,163 @@
#include "stdafx.h"
#include "L4/HashTable/Common/Record.h"
#include "Utils.h"
#include <boost/optional.hpp>
#include <string>
#include <vector>
namespace L4
{
namespace UnitTests
{
using namespace HashTable;
class HashTableRecordTestFixture
{
protected:
void Run(bool isFixedKey, bool isFixedValue, bool useMetaValue)
{
BOOST_TEST_MESSAGE(
"Running with isFixedKey=" << isFixedKey
<< ", isFixedValue=" << isFixedValue
<< ", useMetatValue=" << useMetaValue);
const std::string key = "TestKey";
const std::string value = "TestValue";
const std::string metaValue = "TestMetavalue";
const auto recordOverhead = (isFixedKey ? 0U : c_keyTypeSize) + (isFixedValue ? 0U : c_valueTypeSize);
Validate(
RecordSerializer{
isFixedKey ? static_cast<RecordSerializer::KeySize>(key.size()) : 0U,
isFixedValue ? static_cast<RecordSerializer::ValueSize>(value.size()) : 0U,
useMetaValue ? static_cast<RecordSerializer::ValueSize>(metaValue.size()) : 0U },
key,
value,
recordOverhead + key.size() + value.size() + (useMetaValue ? metaValue.size() : 0U),
recordOverhead,
useMetaValue ? boost::optional<const std::string&>{ metaValue } : boost::none);
}
private:
void Validate(
const RecordSerializer& serializer,
const std::string& keyStr,
const std::string& valueStr,
std::size_t expectedBufferSize,
std::size_t expectedRecordOverheadSize,
boost::optional<const std::string&> metadataStr = boost::none)
{
BOOST_CHECK_EQUAL(serializer.CalculateRecordOverhead(), expectedRecordOverheadSize);
const auto key = Utils::ConvertFromString<Record::Key>(keyStr.c_str());
const auto value = Utils::ConvertFromString<Record::Value>(valueStr.c_str());
const auto bufferSize = serializer.CalculateBufferSize(key, value);
BOOST_REQUIRE_EQUAL(bufferSize, expectedBufferSize);
std::vector<std::uint8_t> buffer(bufferSize);
RecordBuffer* recordBuffer = nullptr;
if (metadataStr)
{
auto metaValue = Utils::ConvertFromString<Record::Value>(metadataStr->c_str());
recordBuffer = serializer.Serialize(key, value, metaValue, buffer.data(), bufferSize);
}
else
{
recordBuffer = serializer.Serialize(key, value, buffer.data(), bufferSize);
}
const auto record = serializer.Deserialize(*recordBuffer);
// Make sure the data serialized is in different memory location.
BOOST_CHECK(record.m_key.m_data != key.m_data);
BOOST_CHECK(record.m_value.m_data != value.m_data);
BOOST_CHECK(record.m_key == key);
if (metadataStr)
{
const std::string newValueStr = *metadataStr + valueStr;
const auto newValue = Utils::ConvertFromString<Record::Value>(newValueStr.c_str());
BOOST_CHECK(record.m_value == newValue);
}
else
{
BOOST_CHECK(record.m_value == value);
}
}
static constexpr std::size_t c_keyTypeSize = sizeof(Record::Key::size_type);
static constexpr std::size_t c_valueTypeSize = sizeof(Record::Value::size_type);
};
BOOST_FIXTURE_TEST_SUITE(HashTableRecordTests, HashTableRecordTestFixture)
BOOST_AUTO_TEST_CASE(RunAll)
{
// Run all permutations for Run(), which takes three booleans.
for (int i = 0; i < 8; ++i)
{
Run(
!!((i >> 2) & 1),
!!((i >> 1) & 1),
!!((i) & 1));
}
}
BOOST_AUTO_TEST_CASE(InvalidSizeTest)
{
std::vector<std::uint8_t> buffer(100U);
RecordSerializer serializer{ 4, 5 };
const std::string keyStr = "1234";
const std::string invalidStr = "999999";
const std::string valueStr = "12345";
const auto key = Utils::ConvertFromString<Record::Key>(keyStr.c_str());
const auto value = Utils::ConvertFromString<Record::Value>(valueStr.c_str());
const auto invalidKey = Utils::ConvertFromString<Record::Key>(invalidStr.c_str());
const auto invalidValue = Utils::ConvertFromString<Record::Value>(invalidStr.c_str());
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
serializer.Serialize(invalidKey, value, buffer.data(), buffer.size()),
"Invalid key or value sizes are given.");
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
serializer.Serialize(key, invalidValue, buffer.data(), buffer.size()),
"Invalid key or value sizes are given.");
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
serializer.Serialize(invalidKey, invalidValue, buffer.data(), buffer.size()),
"Invalid key or value sizes are given.");
// Normal case shouldn't thrown an exception.
serializer.Serialize(key, value, buffer.data(), buffer.size());
RecordSerializer serializerWithMetaValue{ 4, 5, 2 };
std::uint16_t metadata = 0;
Record::Value metaValue{
reinterpret_cast<std::uint8_t*>(&metadata),
sizeof(metadata) };
// Normal case shouldn't thrown an exception.
serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size());
// Mismatching size is given.
metaValue.m_size = 1;
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size()),
"Invalid meta value size is given.");
}
BOOST_AUTO_TEST_SUITE_END()
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,52 @@
#include "stdafx.h"
#include "Mocks.h"
#include "Utils.h"
#include "L4/LocalMemory/HashTableService.h"
#include <vector>
#include <utility>
namespace L4
{
namespace UnitTests
{
BOOST_AUTO_TEST_CASE(HashTableServiceTest)
{
std::vector<std::pair<std::string, std::string>> dataSet;
for (std::uint16_t i = 0U; i < 100; ++i)
{
dataSet.emplace_back("key" + std::to_string(i), "value" + std::to_string(i));
}
LocalMemory::HashTableService htService;
htService.AddHashTable(
HashTableConfig("Table1", HashTableConfig::Setting{ 100U }));
htService.AddHashTable(
HashTableConfig(
"Table2",
HashTableConfig::Setting{ 1000U },
HashTableConfig::Cache{ 1024, std::chrono::seconds{ 1U }, false }));
for (const auto& data : dataSet)
{
htService.GetContext()["Table1"].Add(
Utils::ConvertFromString<IReadOnlyHashTable::Key>(data.first.c_str()),
Utils::ConvertFromString<IReadOnlyHashTable::Value>(data.second.c_str()));
}
// Smoke tests for looking up the data .
{
auto context = htService.GetContext();
for (const auto& data : dataSet)
{
IReadOnlyHashTable::Value val;
BOOST_CHECK(context["Table1"].Get(
Utils::ConvertFromString<IReadOnlyHashTable::Key>(data.first.c_str()),
val));
BOOST_CHECK(Utils::ConvertToString(val) == data.second);
}
}
}
} // namespace UnitTests
} // namespace L4

164
Unittests/Mocks.h Normal file
Просмотреть файл

@ -0,0 +1,164 @@
#pragma once
#include "stdafx.h"
#include "L4/Epoch/IEpochActionManager.h"
#include "L4/Log/PerfLogger.h"
#include "L4/Serialization/IStream.h"
namespace L4
{
namespace UnitTests
{
class MockPerfLogger : public IPerfLogger
{
virtual void Log(const IData& data) override
{
(void)data;
}
};
struct MockEpochManager : public IEpochActionManager
{
MockEpochManager()
: m_numRegisterActionsCalled(0)
{
}
virtual void RegisterAction(Action&& action) override
{
++m_numRegisterActionsCalled;
action();
};
std::uint16_t m_numRegisterActionsCalled;
};
class StreamBase
{
public:
using StreamBuffer = std::vector<std::uint8_t>;
protected:
StreamBase() = default;
void Begin()
{
m_isBeginCalled = !m_isBeginCalled;
if (!m_isBeginCalled)
{
BOOST_FAIL("Begin() is called multiple times.");
}
}
void End()
{
if (!m_isBeginCalled)
{
BOOST_FAIL("Begin() is not called yet.");
}
m_isEndCalled = !m_isEndCalled;
if (!m_isEndCalled)
{
BOOST_FAIL("End() is called multiple times.");
}
}
void Validate()
{
if (!m_isBeginCalled)
{
BOOST_FAIL("Begin() is not called yet.");
}
if (m_isEndCalled)
{
BOOST_FAIL("End() is already called.");
}
}
bool IsValid() const
{
return m_isBeginCalled && m_isEndCalled;
}
bool m_isBeginCalled = false;
bool m_isEndCalled = false;
};
class MockStreamWriter : public IStreamWriter, private StreamBase
{
public:
virtual void Begin() override
{
StreamBase::Begin();
}
virtual void End() override
{
StreamBase::End();
}
virtual void Write(const std::uint8_t buffer[], std::size_t bufferSize) override
{
StreamBase::Validate();
m_buffer.insert(m_buffer.end(), buffer, buffer + bufferSize);
}
bool IsValid() const
{
return StreamBase::IsValid();
}
const StreamBuffer& GetStreamBuffer() const
{
return m_buffer;
}
private:
StreamBuffer m_buffer;
};
class MockStreamReader : public IStreamReader, private StreamBase
{
public:
explicit MockStreamReader(const StreamBuffer& buffer)
: m_buffer(buffer),
m_bufferIter(m_buffer.cbegin())
{
}
virtual void Begin() override
{
StreamBase::Begin();
}
virtual void End() override
{
// Make sure every thing is read from stream.
BOOST_REQUIRE(m_bufferIter == m_buffer.end());
StreamBase::End();
}
virtual void Read(std::uint8_t buffer[], std::size_t bufferSize) override
{
StreamBase::Validate();
std::copy(m_bufferIter, m_bufferIter + bufferSize, buffer);
m_bufferIter += bufferSize;
}
bool IsValid() const
{
return StreamBase::IsValid();
}
private:
StreamBuffer m_buffer;
StreamBuffer::const_iterator m_bufferIter;
};
} // namespace UnitTests
} // namespace L4

104
Unittests/PerfInfoTest.cpp Normal file
Просмотреть файл

@ -0,0 +1,104 @@
#include "stdafx.h"
#include "L4/Log/PerfLogger.h"
#include <limits>
namespace L4
{
namespace LocalMemory
{
void CheckMinCounters(const HashTablePerfData& htPerfData)
{
const auto maxValue = (std::numeric_limits<std::int64_t>::max)();
/// Check if the min counter values are correctly initialized to max value.
BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinValueSize), maxValue);
BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinKeySize), maxValue);
}
BOOST_AUTO_TEST_CASE(PerfCountersTest)
{
enum class TestCounter
{
Counter = 0,
Count
};
PerfCounters<TestCounter> perfCounters;
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 0);
perfCounters.Set(TestCounter::Counter, 10);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
perfCounters.Increment(TestCounter::Counter);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 11);
perfCounters.Decrement(TestCounter::Counter);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
perfCounters.Add(TestCounter::Counter, 5);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 15);
perfCounters.Subtract(TestCounter::Counter, 10);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 5);
perfCounters.Max(TestCounter::Counter, 10);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
perfCounters.Max(TestCounter::Counter, 9);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
perfCounters.Min(TestCounter::Counter, 1);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1);
perfCounters.Min(TestCounter::Counter, 10);
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1);
}
BOOST_AUTO_TEST_CASE(PerfDataTest)
{
PerfData testPerfData;
BOOST_CHECK(testPerfData.GetHashTablesPerfData().empty());
HashTablePerfData htPerfData1;
HashTablePerfData htPerfData2;
HashTablePerfData htPerfData3;
CheckMinCounters(htPerfData1);
CheckMinCounters(htPerfData2);
CheckMinCounters(htPerfData3);
testPerfData.AddHashTablePerfData("HT1", htPerfData1);
testPerfData.AddHashTablePerfData("HT2", htPerfData2);
testPerfData.AddHashTablePerfData("HT3", htPerfData3);
/// Update counters and check if they are correctly updated.
htPerfData1.Set(HashTablePerfCounter::TotalKeySize, 10);
htPerfData2.Set(HashTablePerfCounter::TotalKeySize, 20);
htPerfData3.Set(HashTablePerfCounter::TotalKeySize, 30);
// Check if the hash table perf data is correctly registered.
const auto& hashTablesPerfData = testPerfData.GetHashTablesPerfData();
BOOST_CHECK_EQUAL(hashTablesPerfData.size(), 3U);
{
auto htPerfDataIt = hashTablesPerfData.find("HT1");
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 10);
}
{
auto htPerfDataIt = hashTablesPerfData.find("HT2");
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 20);
}
{
auto htPerfDataIt = hashTablesPerfData.find("HT3");
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 30);
}
}
} // namespace LocalMemory
} // namespace L4

Просмотреть файл

@ -0,0 +1,220 @@
#include "stdafx.h"
#include "Utils.h"
#include "Mocks.h"
#include "L4/HashTable/ReadWrite/HashTable.h"
#include "L4/HashTable/ReadWrite/Serializer.h"
#include "L4/Log/PerfCounter.h"
#include <string>
#include <vector>
namespace L4
{
namespace UnitTests
{
class LocalMemory
{
public:
template <typename T = void>
using Allocator = typename std::allocator<T>;
template <typename T>
using Deleter = typename std::default_delete<T>;
template <typename T>
using UniquePtr = std::unique_ptr<T>;
LocalMemory() = default;
template <typename T, typename... Args>
auto MakeUnique(Args&&... args)
{
return std::make_unique<T>(std::forward<Args>(args)...);
}
template <typename T = void>
auto GetAllocator()
{
return Allocator<T>();
}
template <typename T>
auto GetDeleter()
{
return Deleter<T>();
}
LocalMemory(const LocalMemory&) = delete;
LocalMemory& operator=(const LocalMemory&) = delete;
};
using namespace HashTable::ReadWrite;
BOOST_AUTO_TEST_SUITE(HashTableSerializerTests)
using KeyValuePair = std::pair<std::string, std::string>;
using KeyValuePairs = std::vector<KeyValuePair>;
using Memory = LocalMemory;
using Allocator = typename Memory:: template Allocator<>;
using HashTable = WritableHashTable<Allocator>::HashTable;
template <typename Serializer, typename Deserializer>
void ValidateSerializer(
const Serializer& serializer,
const Deserializer& deserializer,
std::uint8_t serializerVersion,
const KeyValuePairs& keyValuePairs,
const Utils::ExpectedCounterValues& expectedCounterValuesAfterLoad,
const Utils::ExpectedCounterValues& expectedCounterValuesAfterSerialization,
const Utils::ExpectedCounterValues& expectedCounterValuesAfterDeserialization)
{
LocalMemory memory;
MockEpochManager epochManager;
auto hashTableHolder{
memory.MakeUnique<HashTable>(
HashTable::Setting{ 5 }, memory.GetAllocator()) };
BOOST_CHECK(hashTableHolder != nullptr);
WritableHashTable<Allocator> writableHashTable(*hashTableHolder, epochManager);
// Insert the given key/value pairs to the hash table.
for (const auto& pair : keyValuePairs)
{
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(pair.first.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(pair.second.c_str());
writableHashTable.Add(key, val);
}
const auto& perfData = writableHashTable.GetPerfData();
Utils::ValidateCounters(perfData, expectedCounterValuesAfterLoad);
// Now write the hash table to the stream.
MockStreamWriter writer;
BOOST_CHECK(!writer.IsValid());
serializer.Serialize(*hashTableHolder, writer);
BOOST_CHECK(writer.IsValid());
Utils::ValidateCounters(perfData, expectedCounterValuesAfterSerialization);
// Read in the hash table from the stream and validate it.
MockStreamReader reader(writer.GetStreamBuffer());
// version == 0 means that it's run through the HashTableSerializer, thus the following can be skipped.
if (serializerVersion != 0)
{
std::uint8_t actualSerializerVersion = 0;
reader.Begin();
reader.Read(&actualSerializerVersion, sizeof(actualSerializerVersion));
BOOST_CHECK(actualSerializerVersion == serializerVersion);
}
else
{
BOOST_REQUIRE(typeid(L4::HashTable::ReadWrite::Serializer<HashTable>) == typeid(Serializer));
}
BOOST_CHECK(!reader.IsValid());
auto newHashTableHolder = deserializer.Deserialize(memory, reader);
BOOST_CHECK(reader.IsValid());
BOOST_CHECK(newHashTableHolder != nullptr);
WritableHashTable<Allocator> newWritableHashTable(*newHashTableHolder, epochManager);
const auto& newPerfData = newWritableHashTable.GetPerfData();
Utils::ValidateCounters(newPerfData, expectedCounterValuesAfterDeserialization);
// Make sure all the key/value pairs exist after deserialization.
for (const auto& pair : keyValuePairs)
{
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(pair.first.c_str());
IReadOnlyHashTable::Value val;
BOOST_CHECK(newWritableHashTable.Get(key, val));
BOOST_CHECK(Utils::ConvertToString(val) == pair.second);
}
}
BOOST_AUTO_TEST_CASE(CurrentSerializerTest)
{
ValidateSerializer(
Current::Serializer<HashTable>{},
Current::Deserializer<Memory, HashTable>{ L4::Utils::Properties{} },
Current::c_version,
{
{ "hello1", " world1" },
{ "hello2", " world2" },
{ "hello3", " world3" }
},
{
{ HashTablePerfCounter::RecordsCount, 3 },
{ HashTablePerfCounter::BucketsCount, 5 },
{ HashTablePerfCounter::TotalKeySize, 18 },
{ HashTablePerfCounter::TotalValueSize, 21 },
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
},
{
{ HashTablePerfCounter::RecordsCount, 3 },
{ HashTablePerfCounter::BucketsCount, 5 },
{ HashTablePerfCounter::TotalKeySize, 18 },
{ HashTablePerfCounter::TotalValueSize, 21 },
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 }
},
{
{ HashTablePerfCounter::RecordsCount, 3 },
{ HashTablePerfCounter::BucketsCount, 5 },
{ HashTablePerfCounter::TotalKeySize, 18 },
{ HashTablePerfCounter::TotalValueSize, 21 },
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 },
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
});
}
BOOST_AUTO_TEST_CASE(HashTableSerializeTest)
{
// This test case tests end to end scenario using the HashTableSerializer.
ValidateSerializer(
Serializer<HashTable>{},
Deserializer<Memory, HashTable>{ L4::Utils::Properties{} },
0U,
{
{ "hello1", " world1" },
{ "hello2", " world2" },
{ "hello3", " world3" }
},
{
{ HashTablePerfCounter::RecordsCount, 3 },
{ HashTablePerfCounter::BucketsCount, 5 },
{ HashTablePerfCounter::TotalKeySize, 18 },
{ HashTablePerfCounter::TotalValueSize, 21 },
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
},
{
{ HashTablePerfCounter::RecordsCount, 3 },
{ HashTablePerfCounter::BucketsCount, 5 },
{ HashTablePerfCounter::TotalKeySize, 18 },
{ HashTablePerfCounter::TotalValueSize, 21 },
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 }
},
{
{ HashTablePerfCounter::RecordsCount, 3 },
{ HashTablePerfCounter::BucketsCount, 5 },
{ HashTablePerfCounter::TotalKeySize, 18 },
{ HashTablePerfCounter::TotalValueSize, 21 },
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 },
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
});
}
BOOST_AUTO_TEST_SUITE_END()
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,676 @@
#include "stdafx.h"
#include "Utils.h"
#include "Mocks.h"
#include "CheckedAllocator.h"
#include "L4/Log/PerfCounter.h"
#include "L4/HashTable/ReadWrite/HashTable.h"
namespace L4
{
namespace UnitTests
{
using namespace HashTable::ReadWrite;
class ReadWriteHashTableTestFixture
{
protected:
using Allocator = CheckedAllocator<>;
using HashTable = WritableHashTable<Allocator>::HashTable;
ReadWriteHashTableTestFixture()
: m_allocator{}
, m_epochManager{}
{}
Allocator m_allocator;
MockEpochManager m_epochManager;
};
BOOST_FIXTURE_TEST_SUITE(ReadWriteHashTableTests, ReadWriteHashTableTestFixture)
BOOST_AUTO_TEST_CASE(HashTableTest)
{
HashTable hashTable{ HashTable::Setting{ 100, 5 }, m_allocator };
WritableHashTable<Allocator> writableHashTable(hashTable, m_epochManager);
ReadOnlyHashTable<Allocator> readOnlyHashTable(hashTable);
const auto& perfData = writableHashTable.GetPerfData();
{
// Check empty data.
std::string keyStr = "hello";
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
IReadOnlyHashTable::Value data;
BOOST_CHECK(!readOnlyHashTable.Get(key, data));
const auto c_counterMaxValue = (std::numeric_limits<HashTablePerfData::TValue>::max)();
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 0 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::ChainingEntriesCount, 0 },
{ HashTablePerfCounter::TotalKeySize, 0 },
{ HashTablePerfCounter::TotalValueSize, 0 },
{ HashTablePerfCounter::MinKeySize, c_counterMaxValue },
{ HashTablePerfCounter::MaxKeySize, 0 },
{ HashTablePerfCounter::MinValueSize, c_counterMaxValue },
{ HashTablePerfCounter::MaxValueSize, 0 }
});
}
{
// First record added.
std::string keyStr = "hello";
std::string valStr = "world";
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
writableHashTable.Add(key, val);
IReadOnlyHashTable::Value value;
BOOST_CHECK(readOnlyHashTable.Get(key, value));
BOOST_CHECK(value.m_size == valStr.size());
BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 1 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::ChainingEntriesCount, 0 },
{ HashTablePerfCounter::TotalKeySize, 5 },
{ HashTablePerfCounter::TotalValueSize, 5 },
{ HashTablePerfCounter::MinKeySize, 5 },
{ HashTablePerfCounter::MaxKeySize, 5 },
{ HashTablePerfCounter::MinValueSize, 5 },
{ HashTablePerfCounter::MaxValueSize, 5 }
});
}
{
// Second record added.
std::string keyStr = "hello2";
std::string valStr = "world2";
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
writableHashTable.Add(key, val);
IReadOnlyHashTable::Value value;
BOOST_CHECK(readOnlyHashTable.Get(key, value));
BOOST_CHECK(value.m_size == valStr.size());
BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 2 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::ChainingEntriesCount, 0 },
{ HashTablePerfCounter::TotalKeySize, 11 },
{ HashTablePerfCounter::TotalValueSize, 11 },
{ HashTablePerfCounter::MinKeySize, 5 },
{ HashTablePerfCounter::MaxKeySize, 6 },
{ HashTablePerfCounter::MinValueSize, 5 },
{ HashTablePerfCounter::MaxValueSize, 6 }
});
}
{
// Update the key with value bigger than the existing values.
std::string keyStr = "hello";
std::string valStr = "world long string";
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
writableHashTable.Add(key, val);
IReadOnlyHashTable::Value value;
BOOST_CHECK(readOnlyHashTable.Get(key, value));
BOOST_CHECK(value.m_size == valStr.size());
BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 1);
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 2 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::ChainingEntriesCount, 0 },
{ HashTablePerfCounter::TotalKeySize, 11 },
{ HashTablePerfCounter::TotalValueSize, 23 },
{ HashTablePerfCounter::MinKeySize, 5 },
{ HashTablePerfCounter::MaxKeySize, 6 },
{ HashTablePerfCounter::MinValueSize, 5 },
{ HashTablePerfCounter::MaxValueSize, 17 }
});
}
{
// Update the key with value smaller than the existing values.
std::string keyStr = "hello2";
std::string valStr = "wo";
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
writableHashTable.Add(key, val);
IReadOnlyHashTable::Value value;
BOOST_CHECK(readOnlyHashTable.Get(key, value));
BOOST_CHECK(value.m_size == valStr.size());
BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 2);
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 2 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::ChainingEntriesCount, 0 },
{ HashTablePerfCounter::TotalKeySize, 11 },
{ HashTablePerfCounter::TotalValueSize, 19 },
{ HashTablePerfCounter::MinKeySize, 5 },
{ HashTablePerfCounter::MaxKeySize, 6 },
{ HashTablePerfCounter::MinValueSize, 2 },
{ HashTablePerfCounter::MaxValueSize, 17 }
});
}
{
// Remove the first key.
std::string keyStr = "hello";
std::string valStr = "";
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
BOOST_CHECK(writableHashTable.Remove(key));
BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 3);
// Note that the Remove() doesn't change Min/Max counters by design.
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 1 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::ChainingEntriesCount, 0 },
{ HashTablePerfCounter::TotalKeySize, 6 },
{ HashTablePerfCounter::TotalValueSize, 2 },
{ HashTablePerfCounter::MinKeySize, 5 },
{ HashTablePerfCounter::MaxKeySize, 6 },
{ HashTablePerfCounter::MinValueSize, 2 },
{ HashTablePerfCounter::MaxValueSize, 17 }
});
// Remove the second key.
keyStr = "hello2";
key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
BOOST_CHECK(writableHashTable.Remove(key));
BOOST_CHECK(m_epochManager.m_numRegisterActionsCalled == 4);
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 0 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::ChainingEntriesCount, 0 },
{ HashTablePerfCounter::TotalKeySize, 0 },
{ HashTablePerfCounter::TotalValueSize, 0 },
{ HashTablePerfCounter::MinKeySize, 5 },
{ HashTablePerfCounter::MaxKeySize, 6 },
{ HashTablePerfCounter::MinValueSize, 2 },
{ HashTablePerfCounter::MaxValueSize, 17 }
});
// Removing the key that doesn't exist.
BOOST_CHECK(!writableHashTable.Remove(key));
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 0 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::ChainingEntriesCount, 0 },
{ HashTablePerfCounter::TotalKeySize, 0 },
{ HashTablePerfCounter::TotalValueSize, 0 },
{ HashTablePerfCounter::MinKeySize, 5 },
{ HashTablePerfCounter::MaxKeySize, 6 },
{ HashTablePerfCounter::MinValueSize, 2 },
{ HashTablePerfCounter::MaxValueSize, 17 }
});
}
}
BOOST_AUTO_TEST_CASE(HashTableWithOneBucketTest)
{
Allocator allocator;
HashTable hashTable{ HashTable::Setting{ 1 }, allocator };
WritableHashTable<Allocator> writableHashTable(hashTable, m_epochManager);
ReadOnlyHashTable<Allocator> readOnlyHashTable(hashTable);
const auto& perfData = writableHashTable.GetPerfData();
Utils::ValidateCounters(perfData, { { HashTablePerfCounter::ChainingEntriesCount, 0 } });
const auto initialTotalIndexSize = perfData.Get(HashTablePerfCounter::TotalIndexSize);
const std::size_t c_dataSetSize = HashTable::Entry::c_numDataPerEntry + 5U;
std::size_t expectedTotalKeySize = 0U;
std::size_t expectedTotalValueSize = 0U;
for (auto i = 0U; i < c_dataSetSize; ++i)
{
std::stringstream keyStream;
keyStream << "key" << i;
std::stringstream valStream;
valStream << "value" << i;
std::string keyStr = keyStream.str();
std::string valStr = valStream.str();
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
expectedTotalKeySize += key.m_size;
expectedTotalValueSize += val.m_size;
writableHashTable.Add(key, val);
IReadOnlyHashTable::Value value;
BOOST_CHECK(readOnlyHashTable.Get(key, value));
BOOST_CHECK(value.m_size == valStr.size());
BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
}
using L4::HashTable::RecordSerializer;
// Variable key/value sizes.
const auto recordOverhead = RecordSerializer{ 0U, 0U }.CalculateRecordOverhead();
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, c_dataSetSize },
{ HashTablePerfCounter::BucketsCount, 1 },
{ HashTablePerfCounter::MaxBucketChainLength, 2 },
{ HashTablePerfCounter::ChainingEntriesCount, 1 },
{ HashTablePerfCounter::TotalKeySize, expectedTotalKeySize },
{ HashTablePerfCounter::TotalValueSize, expectedTotalValueSize },
{
HashTablePerfCounter::TotalIndexSize,
initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead)
}
});
// Now replace with new values.
expectedTotalValueSize = 0U;
for (auto i = 0U; i < c_dataSetSize; ++i)
{
std::stringstream keyStream;
keyStream << "key" << i;
std::stringstream valStream;
valStream << "val" << i;
std::string keyStr = keyStream.str();
std::string valStr = valStream.str();
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
expectedTotalValueSize += val.m_size;
writableHashTable.Add(key, val);
IReadOnlyHashTable::Value value;
BOOST_CHECK(readOnlyHashTable.Get(key, value));
BOOST_CHECK(value.m_size == valStr.size());
BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
}
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, c_dataSetSize },
{ HashTablePerfCounter::BucketsCount, 1 },
{ HashTablePerfCounter::MaxBucketChainLength, 2 },
{ HashTablePerfCounter::ChainingEntriesCount, 1 },
{ HashTablePerfCounter::TotalKeySize, expectedTotalKeySize },
{ HashTablePerfCounter::TotalValueSize, expectedTotalValueSize },
{
HashTablePerfCounter::TotalIndexSize,
initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead)
}
});
// Now remove all key-value.
for (auto i = 0U; i < c_dataSetSize; ++i)
{
std::stringstream keyStream;
keyStream << "key" << i;
std::string keyStr = keyStream.str();
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
BOOST_CHECK(writableHashTable.Remove(key));
IReadOnlyHashTable::Value value;
BOOST_CHECK(!readOnlyHashTable.Get(key, value));
}
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, 0 },
{ HashTablePerfCounter::BucketsCount, 1 },
{ HashTablePerfCounter::MaxBucketChainLength, 2 },
{ HashTablePerfCounter::ChainingEntriesCount, 1 },
{ HashTablePerfCounter::TotalKeySize, 0 },
{ HashTablePerfCounter::TotalValueSize, 0 },
{
HashTablePerfCounter::TotalIndexSize,
initialTotalIndexSize + sizeof(HashTable::Entry)
}
});
// Try to add back to the same bucket (reusing existing entries)
expectedTotalKeySize = 0U;
expectedTotalValueSize = 0U;
for (auto i = 0U; i < c_dataSetSize; ++i)
{
std::stringstream keyStream;
keyStream << "key" << i;
std::stringstream valStream;
valStream << "value" << i;
std::string keyStr = keyStream.str();
std::string valStr = valStream.str();
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
expectedTotalKeySize += key.m_size;
expectedTotalValueSize += val.m_size;
writableHashTable.Add(key, val);
IReadOnlyHashTable::Value value;
BOOST_CHECK(readOnlyHashTable.Get(key, value));
BOOST_CHECK(value.m_size == valStr.size());
BOOST_CHECK(!memcmp(value.m_data, valStr.c_str(), valStr.size()));
}
Utils::ValidateCounters(
perfData,
{
{ HashTablePerfCounter::RecordsCount, c_dataSetSize },
{ HashTablePerfCounter::BucketsCount, 1 },
{ HashTablePerfCounter::MaxBucketChainLength, 2 },
{ HashTablePerfCounter::ChainingEntriesCount, 1 },
{ HashTablePerfCounter::TotalKeySize, expectedTotalKeySize },
{ HashTablePerfCounter::TotalValueSize, expectedTotalValueSize },
{
HashTablePerfCounter::TotalIndexSize,
initialTotalIndexSize + sizeof(HashTable::Entry) + (c_dataSetSize * recordOverhead)
}
});
}
BOOST_AUTO_TEST_CASE(AddRemoveSameKeyTest)
{
HashTable hashTable{ HashTable::Setting{ 100, 5 }, m_allocator };
WritableHashTable<Allocator> writableHashTable(hashTable, m_epochManager);
ReadOnlyHashTable<Allocator> readOnlyHashTable(hashTable);
// Add two key/value pairs.
auto key1 = Utils::ConvertFromString<IReadOnlyHashTable::Key>("key1");
auto val1 = Utils::ConvertFromString<IReadOnlyHashTable::Value>("val1");
writableHashTable.Add(key1, val1);
IReadOnlyHashTable::Value valueRetrieved;
BOOST_CHECK(readOnlyHashTable.Get(key1, valueRetrieved));
BOOST_CHECK(valueRetrieved.m_size == val1.m_size);
BOOST_CHECK(!memcmp(valueRetrieved.m_data, val1.m_data, val1.m_size));
auto key2 = Utils::ConvertFromString<IReadOnlyHashTable::Key>("key2");
auto val2 = Utils::ConvertFromString<IReadOnlyHashTable::Value>("val2");
writableHashTable.Add(key2, val2);
BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved));
BOOST_CHECK(valueRetrieved.m_size == val2.m_size);
BOOST_CHECK(!memcmp(valueRetrieved.m_data, val2.m_data, val2.m_size));
const auto& perfData = writableHashTable.GetPerfData();
// Now remove the first record with key = "key1", which is at the head of the chain.
BOOST_CHECK(writableHashTable.Remove(key1));
BOOST_CHECK(!readOnlyHashTable.Get(key1, valueRetrieved));
Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1);
// Now try update the record with key = "key2". This should correctly update the existing record
// instead of using the empty slot created by removing the record with key = "key1".
auto newVal2 = Utils::ConvertFromString<IReadOnlyHashTable::Value>("newVal2");
writableHashTable.Add(key2, newVal2);
BOOST_CHECK(readOnlyHashTable.Get(key2, valueRetrieved));
BOOST_CHECK(valueRetrieved.m_size == newVal2.m_size);
BOOST_CHECK(!memcmp(valueRetrieved.m_data, newVal2.m_data, newVal2.m_size));
Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 1);
// Remove the record with key = "key2".
BOOST_CHECK(writableHashTable.Remove(key2));
BOOST_CHECK(!writableHashTable.Remove(key2));
Utils::ValidateCounter(perfData, HashTablePerfCounter::RecordsCount, 0);
}
BOOST_AUTO_TEST_CASE(FixedKeyValueHashTableTest)
{
// Fixed 4 byte keys and 6 byte values.
std::vector<HashTable::Setting> settings =
{
HashTable::Setting{ 100, 200, 4, 0 },
HashTable::Setting{ 100, 200, 0, 6 },
HashTable::Setting{ 100, 200, 4, 6 }
};
for (const auto& setting : settings)
{
HashTable hashTable{ setting, m_allocator };
WritableHashTable<Allocator> writableHashTable(hashTable, m_epochManager);
ReadOnlyHashTable<Allocator> readOnlyHashTable(hashTable);
constexpr std::uint8_t c_numRecords = 10;
for (std::uint8_t i = 0; i < c_numRecords; ++i)
{
const std::string keyStr = "key" + std::to_string(i);
const std::string valueStr = "value" + std::to_string(i);
writableHashTable.Add(
Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str()),
Utils::ConvertFromString<IReadOnlyHashTable::Value>(valueStr.c_str()));
}
Utils::ValidateCounters(
writableHashTable.GetPerfData(),
{
{ HashTablePerfCounter::RecordsCount, 10 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::TotalKeySize, 40 },
{ HashTablePerfCounter::TotalValueSize, 60 },
{ HashTablePerfCounter::MinKeySize, 4 },
{ HashTablePerfCounter::MaxKeySize, 4 },
{ HashTablePerfCounter::MinValueSize, 6 },
{ HashTablePerfCounter::MaxValueSize, 6 }
});
for (std::uint8_t i = 0; i < c_numRecords; ++i)
{
const std::string keyStr = "key" + std::to_string(i);
const std::string valueStr = "value" + std::to_string(i);
const auto expectedValue = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valueStr.c_str());
IReadOnlyHashTable::Value actualValue;
BOOST_CHECK(readOnlyHashTable.Get(
Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str()),
actualValue));
BOOST_CHECK(expectedValue == actualValue);
}
for (std::uint8_t i = 0; i < c_numRecords; ++i)
{
const std::string keyStr = "key" + std::to_string(i);
writableHashTable.Remove(
Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str()));
}
Utils::ValidateCounters(
writableHashTable.GetPerfData(),
{
{ HashTablePerfCounter::RecordsCount, 0 },
{ HashTablePerfCounter::BucketsCount, 100 },
{ HashTablePerfCounter::TotalKeySize, 0 },
{ HashTablePerfCounter::TotalValueSize, 0 }
});
}
}
BOOST_AUTO_TEST_CASE(HashTableIteratorTest)
{
Allocator allocator;
constexpr std::uint32_t c_numBuckets = 10;
HashTable hashTable{ HashTable::Setting{ c_numBuckets }, allocator };
WritableHashTable<Allocator> writableHashTable(hashTable, m_epochManager);
{
// Empty data set, thus iterator cannot move.
auto iter = writableHashTable.GetIterator();
BOOST_CHECK(!iter->MoveNext());
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
iter->GetKey(),
"HashTableIterator is not correctly used.");
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
iter->GetValue(),
"HashTableIterator is not correctly used.");
}
using Buffer = std::vector<std::uint8_t>;
using BufferMap = std::map<Buffer, Buffer>;
BufferMap keyValueMap;
// The number of records should be such that it will create chained entries
// for at least one bucket. So it should be greater than HashTable::Entry::c_numDataPerEntry * number of buckets.
constexpr std::uint32_t c_numRecords = (HashTable::Entry::c_numDataPerEntry * c_numBuckets) + 1;
for (auto i = 0U; i < c_numRecords; ++i)
{
std::stringstream keyStream;
keyStream << "key" << i;
std::stringstream valStream;
valStream << "value" << i;
std::string keyStr = keyStream.str();
std::string valStr = valStream.str();
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(valStr.c_str());
writableHashTable.Add(key, val);
keyValueMap[Buffer(key.m_data, key.m_data + key.m_size)] = Buffer(val.m_data, val.m_data + val.m_size);
}
BOOST_REQUIRE(writableHashTable.GetPerfData().Get(HashTablePerfCounter::MaxBucketChainLength) >= 2);
BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords);
{
BufferMap keyValueMapFromIterator;
// Validate the data using the iterator.
auto iter = writableHashTable.GetIterator();
for (auto i = 0U; i < c_numRecords; ++i)
{
BOOST_CHECK(iter->MoveNext());
const auto& key = iter->GetKey();
const auto& val = iter->GetValue();
keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] = Buffer(val.m_data, val.m_data + val.m_size);
}
BOOST_CHECK(!iter->MoveNext());
BOOST_CHECK(keyValueMap == keyValueMapFromIterator);
// Reset should move the iterator to the beginning.
iter->Reset();
for (auto i = 0U; i < c_numRecords; ++i)
{
BOOST_CHECK(iter->MoveNext());
}
BOOST_CHECK(!iter->MoveNext());
}
// Remove half of the key.
for (auto i = 0U; i < c_numRecords; ++i)
{
if (i % 2 == 0U)
{
std::stringstream keyStream;
keyStream << "key" << i;
std::string keyStr = keyStream.str();
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(keyStr.c_str());
BOOST_CHECK(writableHashTable.Remove(key));
keyValueMap.erase(Buffer(key.m_data, key.m_data + key.m_size));
}
}
BOOST_CHECK_EQUAL(keyValueMap.size(), c_numRecords / 2U);
// Validate only the existing keys are iterated.
{
BufferMap keyValueMapFromIterator;
auto iter = writableHashTable.GetIterator();
for (auto i = 0U; i < c_numRecords / 2U; ++i)
{
BOOST_CHECK(iter->MoveNext());
const auto& key = iter->GetKey();
const auto& val = iter->GetValue();
keyValueMapFromIterator[Buffer(key.m_data, key.m_data + key.m_size)] =
Buffer(val.m_data, val.m_data + val.m_size);
}
BOOST_CHECK(!iter->MoveNext());
BOOST_CHECK(keyValueMap == keyValueMapFromIterator);
}
}
BOOST_AUTO_TEST_SUITE_END()
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,41 @@
#include "stdafx.h"
#include "L4/HashTable/Common/SettingAdapter.h"
#include "L4/HashTable/Common/Record.h"
#include "CheckedAllocator.h"
namespace L4
{
namespace UnitTests
{
using SharedHashTable = HashTable::SharedHashTable<HashTable::RecordBuffer, CheckedAllocator<>>;
BOOST_AUTO_TEST_SUITE(SettingAdapterTests)
BOOST_AUTO_TEST_CASE(SettingAdapterTestWithDefaultValues)
{
HashTableConfig::Setting from{ 100U };
const auto to = HashTable::SettingAdapter{}.Convert<SharedHashTable>(from);
BOOST_CHECK_EQUAL(to.m_numBuckets, 100U);
BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 1U);
BOOST_CHECK_EQUAL(to.m_fixedKeySize, 0U);
BOOST_CHECK_EQUAL(to.m_fixedValueSize, 0U);
}
BOOST_AUTO_TEST_CASE(SettingAdapterTestWithNonDefaultValues)
{
HashTableConfig::Setting from{ 100U, 10U, 5U, 20U };
const auto to = HashTable::SettingAdapter{}.Convert<SharedHashTable>(from);
BOOST_CHECK_EQUAL(to.m_numBuckets, 100U);
BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 10U);
BOOST_CHECK_EQUAL(to.m_fixedKeySize, 5U);
BOOST_CHECK_EQUAL(to.m_fixedValueSize, 20U);
}
BOOST_AUTO_TEST_SUITE_END()
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,94 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{8122529E-61CB-430B-A089-B12E63FC361B}</ProjectGuid>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v140</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<PropertyGroup>
<TargetName>L4.UnitTests</TargetName>
</PropertyGroup>
<ItemDefinitionGroup>
<Link>
<SubSystem>Console</SubSystem>
<AdditionalDependencies>netapi32.lib;%(AdditionalDependencies)</AdditionalDependencies>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
<Lib>
<TargetMachine>MachineX64</TargetMachine>
</Lib>
<ClCompile>
<AdditionalIncludeDirectories>$(SolutionDir)Unittests;$(SolutionDir)inc;$(SolutionDir)inc/L4;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PrecompiledHeader>Use</PrecompiledHeader>
<AdditionalOptions>/Zm136 %(AdditionalOptions)</AdditionalOptions>
<PreprocessorDefinitions>_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<Optimization Condition="'$(Configuration)|$(Platform)'=='Release|x64'">MaxSpeed</Optimization>
<InlineFunctionExpansion Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AnySuitable</InlineFunctionExpansion>
<IntrinsicFunctions Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</IntrinsicFunctions>
<DisableSpecificWarnings>4482;%(DisableSpecificWarnings)</DisableSpecificWarnings>
</ClCompile>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="CacheHashTableTest.cpp" />
<ClCompile Include="EpochManagerTest.cpp" />
<ClCompile Include="HashTableManagerTest.cpp" />
<ClCompile Include="HashTableRecordTest.cpp" />
<ClCompile Include="ReadWriteHashTableSerializerTest.cpp" />
<ClCompile Include="HashTableServiceTest.cpp" />
<ClCompile Include="PerfInfoTest.cpp" />
<ClCompile Include="ReadWriteHashTableTest.cpp" />
<ClCompile Include="SettingAdapterTest.cpp" />
<ClCompile Include="stdafx.cpp">
<PrecompiledHeader>Create</PrecompiledHeader>
</ClCompile>
<ClCompile Include="Utils.cpp" />
<ClCompile Include="UtilsTest.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="CheckedAllocator.h" />
<ClInclude Include="Mocks.h" />
<ClInclude Include="stdafx.h" />
<ClInclude Include="Utils.h" />
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\Build\L4.vcxproj">
<Project>{b7846115-88f1-470b-a625-9de0c29229bb}</Project>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<Import Project="..\packages\boost.1.63.0.0\build\native\boost.targets" Condition="Exists('..\packages\boost.1.63.0.0\build\native\boost.targets')" />
<Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
<PropertyGroup>
<ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
</PropertyGroup>
<Error Condition="!Exists('..\packages\boost.1.63.0.0\build\native\boost.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\boost.1.63.0.0\build\native\boost.targets'))" />
<Error Condition="!Exists('..\packages\boost_unit_test_framework-vc140.1.63.0.0\build\native\boost_unit_test_framework-vc140.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\boost_unit_test_framework-vc140.1.63.0.0\build\native\boost_unit_test_framework-vc140.targets'))" />
<Error Condition="!Exists('..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets'))" />
</Target>
<Import Project="..\packages\boost_unit_test_framework-vc140.1.63.0.0\build\native\boost_unit_test_framework-vc140.targets" Condition="Exists('..\packages\boost_unit_test_framework-vc140.1.63.0.0\build\native\boost_unit_test_framework-vc140.targets')" />
<Import Project="..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets" Condition="Exists('..\packages\boost_thread-vc140.1.63.0.0\build\native\boost_thread-vc140.targets')" />
</Project>

Просмотреть файл

@ -0,0 +1,72 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
</Filter>
<Filter Include="Resource Files">
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="stdafx.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="HashTableServiceTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="UtilsTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="PerfInfoTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="HashTableManagerTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="EpochManagerTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="ReadWriteHashTableTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="Utils.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="ReadWriteHashTableSerializerTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="CacheHashTableTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="SettingAdapterTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="HashTableRecordTest.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="stdafx.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Utils.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Mocks.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="CheckedAllocator.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />
</ItemGroup>
</Project>

37
Unittests/Utils.cpp Normal file
Просмотреть файл

@ -0,0 +1,37 @@
#include "stdafx.h"
#include "Utils.h"
namespace L4
{
namespace UnitTests
{
namespace Utils
{
void ValidateCounter(
const HashTablePerfData& actual,
HashTablePerfCounter perfCounter,
PerfCounters<HashTablePerfCounter>::TValue expectedValue)
{
BOOST_CHECK_MESSAGE(
actual.Get(perfCounter) == expectedValue,
c_hashTablePerfCounterNames[static_cast<std::size_t>(perfCounter)]
<< " counter: "
<< actual.Get(perfCounter)
<< " (actual) != " << expectedValue << " (expected).");
}
void ValidateCounters(
const HashTablePerfData& actual,
const ExpectedCounterValues& expected)
{
for (const auto& expectedCounter : expected)
{
ValidateCounter(actual, expectedCounter.first, expectedCounter.second);
}
}
} // namespace Utils
} // namespace UnitTests
} // namespace L4

105
Unittests/Utils.h Normal file
Просмотреть файл

@ -0,0 +1,105 @@
#pragma once
#include <array>
#include <cstdint>
#include <string>
#include "L4/Log/PerfCounter.h"
#include "L4/Utils/Exception.h"
namespace L4
{
namespace UnitTests
{
// Macro CHECK_EXCEPTION_THROWN
#define CHECK_EXCEPTION_THROWN(statement) \
do { \
bool isExceptionThrown = false;\
try \
{ \
statement; \
} \
catch (const RuntimeException&) \
{ \
isExceptionThrown = true; \
} \
BOOST_CHECK(isExceptionThrown); \
} while (0)
#define CHECK_EXCEPTION_THROWN_WITH_MESSAGE(statement, message) \
do { \
bool isExceptionThrown = false; \
std::string exceptionMsg; \
try \
{ \
statement; \
} \
catch (const RuntimeException& ex) \
{ \
isExceptionThrown = true; \
exceptionMsg = ex.what(); \
BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \
} \
BOOST_CHECK(isExceptionThrown); \
BOOST_CHECK(strcmp((message), exceptionMsg.c_str()) == 0); \
} while (0)
// This will validate the given message is a prefix of the exception message.
#define CHECK_EXCEPTION_THROWN_WITH_PREFIX_MESSAGE(statement, message) \
do { \
bool isExceptionThrown = false; \
std::string exceptionMsg; \
try \
{ \
statement; \
} \
catch (const RuntimeException& ex) \
{ \
isExceptionThrown = true; \
exceptionMsg = ex.what(); \
BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \
} \
BOOST_CHECK(isExceptionThrown); \
BOOST_CHECK(exceptionMsg.compare(0, strlen(message), message) == 0); \
} while (0)
namespace Utils
{
template <typename T>
T ConvertFromString(const char* str)
{
return T(
reinterpret_cast<const std::uint8_t*>(str),
static_cast<T::size_type>(strlen(str)));
}
template <typename T>
std::string ConvertToString(const T& t)
{
return std::string(reinterpret_cast<const char*>(t.m_data), t.m_size);
}
// Counter related validation util function.
using ExpectedCounterValues = std::vector<std::pair<HashTablePerfCounter, PerfCounters<HashTablePerfCounter>::TValue>>;
// Validate the given perfData against the expected counter value.
void ValidateCounter(
const HashTablePerfData& actual,
HashTablePerfCounter perfCounter,
PerfCounters<HashTablePerfCounter>::TValue expectedValue);
// Validate the given perfData against the expected counter values.
void ValidateCounters(
const HashTablePerfData& actual,
const ExpectedCounterValues& expected);
} // namespace Utils
} // namespace UnitTests
} // namespace L4

54
Unittests/UtilsTest.cpp Normal file
Просмотреть файл

@ -0,0 +1,54 @@
#include "stdafx.h"
#include "L4/Utils/Math.h"
#include <array>
namespace L4
{
namespace UnitTests
{
using namespace Utils;
BOOST_AUTO_TEST_CASE(MathTest)
{
// RoundUp tests.
BOOST_CHECK_EQUAL(Math::RoundUp(5, 10), 10);
BOOST_CHECK_EQUAL(Math::RoundUp(10, 10), 10);
BOOST_CHECK_EQUAL(Math::RoundUp(11, 10), 20);
BOOST_CHECK_EQUAL(Math::RoundUp(5, 0), 5);
// RoundDown tests.
BOOST_CHECK_EQUAL(Math::RoundDown(5, 10), 0);
BOOST_CHECK_EQUAL(Math::RoundDown(10, 10), 10);
BOOST_CHECK_EQUAL(Math::RoundDown(11, 10), 10);
BOOST_CHECK_EQUAL(Math::RoundDown(5, 0), 5);
// IsPowerOfTwo tests.
BOOST_CHECK(Math::IsPowerOfTwo(2));
BOOST_CHECK(Math::IsPowerOfTwo(4));
BOOST_CHECK(!Math::IsPowerOfTwo(3));
BOOST_CHECK(!Math::IsPowerOfTwo(0));
// NextHighestPowerOfTwo tests.
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(0), 0U);
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(1), 1U);
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(2), 2U);
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(3), 4U);
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(4), 4U);
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(5), 8U);
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(200), 256U);
}
BOOST_AUTO_TEST_CASE(PointerArithmeticTest)
{
std::array<int, 3> elements;
BOOST_CHECK(reinterpret_cast<int*>(Math::PointerArithmetic::Add(&elements[0], sizeof(int))) == &elements[1]);
BOOST_CHECK(reinterpret_cast<int*>(Math::PointerArithmetic::Subtract(&elements[1], sizeof(int))) == &elements[0]);
BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[2], &elements[0]) == sizeof(int) * 2U);
BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[0], &elements[2]) == sizeof(int) * 2U);
}
} // namespace UnitTests
} // namespace L4

Просмотреть файл

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<packages>
<package id="boost" version="1.63.0.0" targetFramework="native" />
<package id="boost_thread-vc140" version="1.63.0.0" targetFramework="native" />
<package id="boost_unit_test_framework-vc140" version="1.63.0.0" targetFramework="native" />
</packages>

1
Unittests/stdafx.cpp Normal file
Просмотреть файл

@ -0,0 +1 @@
#include "stdafx.h"

5
Unittests/stdafx.h Normal file
Просмотреть файл

@ -0,0 +1,5 @@
#pragma once
#define BOOST_TEST_MODULE L4Unittests
#include <boost/test/unit_test.hpp>

32
inc/L4/Epoch/Config.h Normal file
Просмотреть файл

@ -0,0 +1,32 @@
#pragma once
#include <cstdint>
#include <chrono>
namespace L4
{
// EpochManagerConfig struct.
struct EpochManagerConfig
{
// "numActionQueues" indicates how many action containers there will be in order to
// increase the throughput of registering an action.
// "performActionsInParallelThreshold" indicates the threshold value above which
// the actions are performed in parallel.
// "maxNumThreadsToPerformActions" indicates how many threads will be used when
// performing an action in parallel.
explicit EpochManagerConfig(
std::uint32_t epochQueueSize = 1000,
std::chrono::milliseconds epochProcessingInterval = std::chrono::milliseconds{ 1000 },
std::uint8_t numActionQueues = 1)
: m_epochQueueSize{ epochQueueSize }
, m_epochProcessingInterval{ epochProcessingInterval }
, m_numActionQueues{ numActionQueues }
{}
std::uint32_t m_epochQueueSize;
std::chrono::milliseconds m_epochProcessingInterval;
std::uint8_t m_numActionQueues;
};
} // namespace L4

Просмотреть файл

@ -0,0 +1,63 @@
#pragma once
#include <atomic>
#include <cstdint>
#include <map>
#include <memory>
#include <mutex>
#include <tuple>
#include <vector>
#include "IEpochActionManager.h"
#include "Utils/Lock.h"
namespace L4
{
// EpochActionManager provides functionalities to add actions at an epoch and to perform
// actions up to the given epoch.
class EpochActionManager
{
public:
// "numActionQueues" indicates how many action containers there will be in order to
// increase the throughput of registering an action. This will be re-calculated to
// the next highest power of two so that the "&" operator can be used for accessing
// the next queue.
explicit EpochActionManager(std::uint8_t numActionQueues);
// Adds an action at a given epoch counter.
// This function is thread-safe.
void RegisterAction(std::uint64_t epochCounter, IEpochActionManager::Action&& action);
// Perform actions whose associated epoch counter value is less than
// the given epoch counter value, and returns the number of actions performed.
std::uint64_t PerformActions(std::uint64_t epochCounter);
EpochActionManager(const EpochActionManager&) = delete;
EpochActionManager& operator=(const EpochActionManager&) = delete;
private:
using Mutex = Utils::CriticalSection;
using Lock = std::lock_guard<Mutex>;
using Actions = std::vector<IEpochActionManager::Action>;
// The following structure needs to be sorted by the epoch counter.
// If the performance of using std::map becomes an issue, we can revisit this.
using EpochToActions = std::map<std::uint64_t, Actions>;
using EpochToActionsWithLock = std::tuple<std::unique_ptr<Mutex>, EpochToActions>;
// Run actions based on the configuration.
void ApplyActions(Actions& actions);
// Stores mapping from a epoch counter to actions to perform.
std::vector<EpochToActionsWithLock> m_epochToActionsList;
// Used to point to the next EpochToActions to simulate round-robin access.
std::atomic<std::uint32_t> m_counter;
};
} // namespace L4

165
inc/L4/Epoch/EpochQueue.h Normal file
Просмотреть файл

@ -0,0 +1,165 @@
#pragma once
#include <atomic>
#include <memory>
#include <mutex>
#include "Interprocess/Container/Vector.h"
#include "Utils/Exception.h"
#include "Utils/Lock.h"
namespace L4
{
// EpochQueue struct represents reference counts for each epoch.
// Each value of the queue (fixed-size array) is the reference counts at an index,
// where an index represents an epoch (time).
template <
typename TSharableLock,
typename TExclusiveLock,
typename Allocator = std::allocator<void>
>
struct EpochQueue
{
static_assert(
std::is_same<typename TSharableLock::mutex_type, typename TExclusiveLock::mutex_type>::value,
"mutex type should be the same");
public:
EpochQueue(
std::uint64_t epochCounter,
std::uint32_t queueSize,
Allocator allocator = Allocator())
: m_frontIndex{ epochCounter }
, m_backIndex{ epochCounter }
, m_mutexForBackIndex{}
, m_refCounts{ queueSize, Allocator::rebind<RefCount>::other(allocator) }
{
if (queueSize == 0U)
{
throw RuntimeException("Zero queue size is not allowed.");
}
}
using SharableLock = TSharableLock;
using ExclusiveLock = TExclusiveLock;
using RefCount = std::atomic<std::uint32_t>;
using RefCounts = Interprocess::Container::Vector<
RefCount,
typename Allocator::template rebind<RefCount>::other>;
// The followings (m_frontIndex and m_backIndex) are
// accessed/updated only by the owner thread (only one thread), thus
// they don't require any synchronization.
std::size_t m_frontIndex;
// Back index represents the latest epoch counter value. Note that
// this is accessed/updated by multiple threads, thus requires
// synchronization.
std::size_t m_backIndex;
// Read/Write lock for m_backIndex.
typename SharableLock::mutex_type m_mutexForBackIndex;
// Reference counts per epoch count.
// The index represents the epoch counter value and the value represents the reference counts.
RefCounts m_refCounts;
};
// EpochRefManager provides functionality of adding/removing references
// to the epoch counter.
template <typename EpochQueue>
class EpochRefManager
{
public:
explicit EpochRefManager(EpochQueue& epochQueue)
: m_epochQueue(epochQueue)
{}
// Increment a reference to the current epoch counter.
// This function is thread-safe.
std::uint64_t AddRef()
{
// The synchronization is needed for EpochCounterManager::AddNewEpoch().
EpochQueue::SharableLock lock(m_epochQueue.m_mutexForBackIndex);
++m_epochQueue.m_refCounts[m_epochQueue.m_backIndex % m_epochQueue.m_refCounts.size()];
return m_epochQueue.m_backIndex;
}
// Decrement a reference count for the given epoch counter.
// This function is thread-safe.
void RemoveRef(std::uint64_t epochCounter)
{
auto& refCounter = m_epochQueue.m_refCounts[epochCounter % m_epochQueue.m_refCounts.size()];
if (refCounter == 0)
{
throw RuntimeException("Reference counter is invalid.");
}
--refCounter;
}
EpochRefManager(const EpochRefManager&) = delete;
EpochRefManager& operator=(const EpochRefManager&) = delete;
private:
EpochQueue& m_epochQueue;
};
// EpochCounterManager provides functionality of updating the current epoch counter
// and getting the latest unreferenced epoch counter.
template <typename EpochQueue>
class EpochCounterManager
{
public:
explicit EpochCounterManager(EpochQueue& epochQueue)
: m_epochQueue(epochQueue)
{}
// Increments the current epoch count by one.
// This function is thread-safe.
void AddNewEpoch()
{
// The synchronization is needed for EpochRefManager::AddRef().
EpochQueue::ExclusiveLock lock(m_epochQueue.m_mutexForBackIndex);
++m_epochQueue.m_backIndex;
// TODO: check for the overwrap and throw.
}
// Returns the epoch count in the queue where it is the biggest epoch
// count such that all other epoch counts' references are zeros.
// Note that this function is NOT thread safe, and should be run on the
// same thread as the one that calls AddNewEpoch().
std::uint64_t RemoveUnreferenceEpochCounters()
{
while (m_epochQueue.m_backIndex > m_epochQueue.m_frontIndex)
{
if (m_epochQueue.m_refCounts[m_epochQueue.m_frontIndex % m_epochQueue.m_refCounts.size()] == 0U)
{
++m_epochQueue.m_frontIndex;
}
else
{
// There are references to the front of the queue and will return this front index.
break;
}
}
return m_epochQueue.m_frontIndex;
}
EpochCounterManager(const EpochCounterManager&) = delete;
EpochCounterManager& operator=(const EpochCounterManager&) = delete;
private:
EpochQueue& m_epochQueue;
};
} // namespace L4

Просмотреть файл

@ -0,0 +1,42 @@
#pragma once
#include <cstdint>
#include <boost/integer_traits.hpp>
namespace L4
{
// EpochRefPolicy class
template <typename EpochRefManager>
class EpochRefPolicy
{
public:
explicit EpochRefPolicy(EpochRefManager& epochRefManager)
: m_epochRefManager{ epochRefManager }
, m_epochCounter{ m_epochRefManager.AddRef() }
{}
EpochRefPolicy(EpochRefPolicy&& epochRefPolicy)
: m_epochRefManager{ epochRefPolicy.m_epochRefManager }
, m_epochCounter{ epochRefPolicy.m_epochCounter }
{
epochRefPolicy.m_epochCounter = boost::integer_traits<std::uint64_t>::const_max;
}
~EpochRefPolicy()
{
if (m_epochCounter != boost::integer_traits<std::uint64_t>::const_max)
{
m_epochRefManager.RemoveRef(m_epochCounter);
}
}
EpochRefPolicy(const EpochRefPolicy&) = delete;
EpochRefPolicy& operator=(const EpochRefPolicy&) = delete;
private:
EpochRefManager& m_epochRefManager;
std::uint64_t m_epochCounter;
};
} // namespace L4

Просмотреть файл

@ -0,0 +1,22 @@
#pragma once
#include <functional>
namespace L4
{
// IEpochActionManager interface exposes an API for registering an Action.
struct IEpochActionManager
{
using Action = std::function<void()>;
virtual ~IEpochActionManager() {};
// Register actions on the latest epoch in the queue and the action is
// performed when the epoch is removed from the queue.
virtual void RegisterAction(Action&& action) = 0;
};
} // namespace L4

Просмотреть файл

@ -0,0 +1,384 @@
#pragma once
#include <chrono>
#include <cstdint>
#include <mutex>
#include "detail/ToRawPointer.h"
#include "Epoch/IEpochActionManager.h"
#include "HashTable/IHashTable.h"
#include "HashTable/ReadWrite/HashTable.h"
#include "HashTable/Cache/Metadata.h"
#include "Utils/Clock.h"
namespace L4
{
namespace HashTable
{
namespace Cache
{
// ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides
// the functionality to read data given a key.
template <typename Allocator, typename Clock = Utils::EpochClock>
class ReadOnlyHashTable
: public virtual ReadWrite::ReadOnlyHashTable<Allocator>
, protected Clock
{
public:
using Base = ReadWrite::ReadOnlyHashTable<Allocator>;
class Iterator;
ReadOnlyHashTable(
HashTable& hashTable,
std::chrono::seconds recordTimeToLive)
: Base{
hashTable,
RecordSerializer{
hashTable.m_setting.m_fixedKeySize,
hashTable.m_setting.m_fixedValueSize,
Metadata::c_metaDataSize } }
, m_recordTimeToLive{ recordTimeToLive }
{}
virtual bool Get(const Key& key, Value& value) const override
{
const auto status = GetInternal(key, value);
// Note that the following const_cast is safe and necessary to update cache hit information.
const_cast<HashTablePerfData&>(GetPerfData()).Increment(
status
? HashTablePerfCounter::CacheHitCount
: HashTablePerfCounter::CacheMissCount);
return status;
}
virtual IIteratorPtr GetIterator() const override
{
return std::make_unique<Iterator>(
m_hashTable,
m_recordSerializer,
m_recordTimeToLive,
GetCurrentEpochTime());
}
ReadOnlyHashTable(const ReadOnlyHashTable&) = delete;
ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete;
protected:
bool GetInternal(const Key& key, Value& value) const
{
if (!Base::Get(key, value))
{
return false;
}
assert(value.m_size > Metadata::c_metaDataSize);
// If the record with the given key is found, check if the record is expired or not.
// Note that the following const_cast is safe and necessary to update the access status.
Metadata metaData{ const_cast<std::uint32_t*>(reinterpret_cast<const std::uint32_t*>(value.m_data)) };
if (metaData.IsExpired(GetCurrentEpochTime(), m_recordTimeToLive))
{
return false;
}
metaData.UpdateAccessStatus(true);
value.m_data += Metadata::c_metaDataSize;
value.m_size -= Metadata::c_metaDataSize;
return true;
}
std::chrono::seconds m_recordTimeToLive;
};
template <typename Allocator, typename Clock>
class ReadOnlyHashTable<Allocator, Clock>::Iterator : public Base::Iterator
{
public:
using Base = typename Base::Iterator;
Iterator(
const HashTable& hashTable,
const RecordSerializer& recordDeserializer,
std::chrono::seconds recordTimeToLive,
std::chrono::seconds currentEpochTime)
: Base(hashTable, recordDeserializer)
, m_recordTimeToLive{ recordTimeToLive }
, m_currentEpochTime{ currentEpochTime }
{}
Iterator(Iterator&& other)
: Base(std::move(other))
, m_recordTimeToLive{ std::move(other.m_recordTimeToLive) }
, m_currentEpochTime{ std::move(other.m_currentEpochTime) }
{}
bool MoveNext() override
{
if (!Base::MoveNext())
{
return false;
}
do
{
const Metadata metaData{
const_cast<std::uint32_t*>(
reinterpret_cast<const std::uint32_t*>(
Base::GetValue().m_data)) };
if (!metaData.IsExpired(m_currentEpochTime, m_recordTimeToLive))
{
return true;
}
} while (Base::MoveNext());
return false;
}
Value GetValue() const override
{
auto value = Base::GetValue();
value.m_data += Metadata::c_metaDataSize;
value.m_size -= Metadata::c_metaDataSize;
return value;
}
private:
std::chrono::seconds m_recordTimeToLive;
std::chrono::seconds m_currentEpochTime;
};
// The following warning is from the virtual inheritance and safe to disable in this case.
// https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx
#pragma warning(push)
#pragma warning(disable:4250)
// WritableHashTable class implements IWritableHashTable interface and also provides
// the read only access (Get()) to the hash table.
template <typename Allocator, typename Clock = Utils::EpochClock>
class WritableHashTable
: public ReadOnlyHashTable<Allocator, Clock>
, public ReadWrite::WritableHashTable<Allocator>
{
public:
using ReadOnlyBase = ReadOnlyHashTable<Allocator, Clock>;
using WritableBase = typename ReadWrite::WritableHashTable<Allocator>;
WritableHashTable(
HashTable& hashTable,
IEpochActionManager& epochManager,
std::uint64_t maxCacheSizeInBytes,
std::chrono::seconds recordTimeToLive,
bool forceTimeBasedEviction)
: ReadOnlyBase::Base(
hashTable,
RecordSerializer{
hashTable.m_setting.m_fixedKeySize,
hashTable.m_setting.m_fixedValueSize,
Metadata::c_metaDataSize })
, ReadOnlyBase(hashTable, recordTimeToLive)
, WritableBase(hashTable, epochManager)
, m_maxCacheSizeInBytes{ maxCacheSizeInBytes }
, m_forceTimeBasedEviction{ forceTimeBasedEviction }
, m_currentEvictBucketIndex{ 0U }
{}
using ReadOnlyBase::Get;
using ReadOnlyBase::GetPerfData;
virtual void Add(const Key& key, const Value& value) override
{
if (m_forceTimeBasedEviction)
{
EvictBasedOnTime(key);
}
Evict(key.m_size + value.m_size + Metadata::c_metaDataSize);
WritableBase::Add(CreateRecordBuffer(key, value));
}
virtual ISerializerPtr GetSerializer() const override
{
throw std::exception("Not implemented yet.");
}
private:
using Mutex = std::mutex;
using Lock = std::lock_guard<Mutex>;
void EvictBasedOnTime(const Key& key)
{
const auto bucketIndex = GetBucketInfo(key).first;
auto* entry = &m_hashTable.m_buckets[bucketIndex];
const auto curEpochTime = GetCurrentEpochTime();
HashTable::Lock lock{ m_hashTable.GetMutex(bucketIndex) };
while (entry != nullptr)
{
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
{
const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed);
if (data != nullptr)
{
const Metadata metadata{
const_cast<std::uint32_t*>(
reinterpret_cast<const std::uint32_t*>(
m_recordSerializer.Deserialize(*data).m_value.m_data)) };
if (metadata.IsExpired(curEpochTime, m_recordTimeToLive))
{
WritableBase::Remove(*entry, i);
m_hashTable.m_perfData.Increment(HashTablePerfCounter::EvictedRecordsCount);
}
}
}
entry = entry->m_next.Load(std::memory_order_relaxed);
}
}
// Evict uses CLOCK algorithm to evict records based on expiration and access status
// until the number of bytes freed match the given number of bytes needed.
void Evict(std::uint64_t bytesNeeded)
{
std::uint64_t numBytesToFree = CalculateNumBytesToFree(bytesNeeded);
if (numBytesToFree == 0U)
{
return;
}
// Start evicting records with a lock.
Lock evictLock{ m_evictMutex };
// Recalculate the number of bytes to free since other thread may have already evicted.
numBytesToFree = CalculateNumBytesToFree(bytesNeeded);
if (numBytesToFree == 0U)
{
return;
}
const auto curEpochTime = GetCurrentEpochTime();
// The max number of iterations we are going through per eviction is twice the number
// of buckets so that it can clear the access status. Note that this is the worst
// case scenario and the eviction process should exit much quicker in a normal case.
auto& buckets = m_hashTable.m_buckets;
std::uint64_t numIterationsRemaining = buckets.size() * 2U;
while (numBytesToFree > 0U && numIterationsRemaining-- > 0U)
{
const auto currentBucketIndex = m_currentEvictBucketIndex++ % buckets.size();
auto& bucket = buckets[currentBucketIndex];
// Lock the bucket since another thread can bypass Evict() since TotalDataSize can
// be updated before the lock on m_evictMutex is released.
HashTable::UniqueLock lock{ m_hashTable.GetMutex(currentBucketIndex) };
HashTable::Entry* entry = &bucket;
while (entry != nullptr)
{
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
{
const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed);
if (data != nullptr)
{
const auto record = m_recordSerializer.Deserialize(*data);
const auto& value = record.m_value;
Metadata metadata{
const_cast<std::uint32_t*>(
reinterpret_cast<const std::uint32_t*>(
value.m_data)) };
// Evict this record if
// 1: the record is expired, or
// 2: the entry is not recently accessed (and unset the access bit if set).
if (metadata.IsExpired(curEpochTime, m_recordTimeToLive)
|| !metadata.UpdateAccessStatus(false))
{
const auto numBytesFreed = record.m_key.m_size + value.m_size;
numBytesToFree = (numBytesFreed >= numBytesToFree) ? 0U : numBytesToFree - numBytesFreed;
WritableBase::Remove(*entry, i);
m_hashTable.m_perfData.Increment(HashTablePerfCounter::EvictedRecordsCount);
}
}
}
entry = entry->m_next.Load(std::memory_order_relaxed);
}
}
}
// Given the number of bytes needed, it calculates the number of bytes
// to free based on the max cache size.
std::uint64_t CalculateNumBytesToFree(std::uint64_t bytesNeeded) const
{
const auto& perfData = GetPerfData();
const std::uint64_t totalDataSize =
perfData.Get(HashTablePerfCounter::TotalKeySize)
+ perfData.Get(HashTablePerfCounter::TotalValueSize)
+ perfData.Get(HashTablePerfCounter::TotalIndexSize);
if ((bytesNeeded < m_maxCacheSizeInBytes)
&& (totalDataSize + bytesNeeded <= m_maxCacheSizeInBytes))
{
// There are enough free bytes.
return 0U;
}
// (totalDataSize > m_maxCacheSizeInBytes) case is possible:
// 1) If multiple threads are evicting and adding at the same time.
// For example, if thread A was evicting and thread B could have
// used the evicted bytes before thread A consumed.
// 2) If max cache size is set lower than expectation.
return (totalDataSize > m_maxCacheSizeInBytes)
? (totalDataSize - m_maxCacheSizeInBytes + bytesNeeded)
: bytesNeeded;
}
RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value)
{
const auto bufferSize = m_recordSerializer.CalculateBufferSize(key, value);
auto buffer = Detail::to_raw_pointer(
m_hashTable.GetAllocator<std::uint8_t>().allocate(bufferSize));
std::uint32_t metaDataBuffer;
Metadata{ &metaDataBuffer, GetCurrentEpochTime() };
// 4-byte Metadata is inserted between key and value buffer.
return m_recordSerializer.Serialize(
key,
value,
Value{ reinterpret_cast<std::uint8_t*>(&metaDataBuffer), sizeof(metaDataBuffer) },
buffer,
bufferSize);
}
Mutex m_evictMutex;
const std::uint64_t m_maxCacheSizeInBytes;
const bool m_forceTimeBasedEviction;
std::uint64_t m_currentEvictBucketIndex;
};
#pragma warning(pop)
} // namespace Cache
} // namespace HashTable
} // namespace L4

Просмотреть файл

@ -0,0 +1,116 @@
#pragma once
#include <cassert>
#include <chrono>
#include <cstdint>
namespace L4
{
namespace HashTable
{
namespace Cache
{
// Metadata class that stores caching related data.
// It stores access bit to indicate whether a record is recently accessed
// as well as the epoch time when a record is created.
// Note that this works regardless of the alignment of the metadata passed in.
class Metadata
{
public:
// Constructs Metadata with the current epoch time.
Metadata(std::uint32_t* metadata, std::chrono::seconds curEpochTime)
: Metadata{ metadata }
{
*m_metadata = curEpochTime.count() & s_epochTimeMask;
}
explicit Metadata(std::uint32_t* metadata)
: m_metadata{ metadata }
{
assert(m_metadata != nullptr);
}
// Returns the stored epoch time.
std::chrono::seconds GetEpochTime() const
{
// *m_metadata even on the not-aligned memory should be fine since
// only the byte that contains the access bit is modified, and
// byte read is atomic.
return std::chrono::seconds{ *m_metadata & s_epochTimeMask };
}
// Returns true if the stored epoch time is expired based
// on the given current epoch time and time-to-live value.
bool IsExpired(
std::chrono::seconds curEpochTime,
std::chrono::seconds timeToLive) const
{
assert(curEpochTime >= GetEpochTime());
return (curEpochTime - GetEpochTime()) > timeToLive;
}
// Returns true if the access status is on.
bool IsAccessed() const
{
return !!(GetAccessByte() & s_accessSetMask);
}
// If "set" is true, turn on the access bit in the given metadata and store it.
// If "set" is false, turn off the access bit.
// Returns true if the given metadata's access bit was originally on.
bool UpdateAccessStatus(bool set)
{
const auto isAccessBitOn = IsAccessed();
// Set the bit only if the bit is not set, and vice versa.
if (set != isAccessBitOn)
{
if (set)
{
GetAccessByte() |= s_accessSetMask;
}
else
{
GetAccessByte() &= s_accessUnsetMask;
}
}
return isAccessBitOn;
}
static constexpr std::uint16_t c_metaDataSize = sizeof(std::uint32_t);
private:
std::uint8_t GetAccessByte() const
{
return reinterpret_cast<std::uint8_t*>(m_metadata)[s_accessBitByte];
}
std::uint8_t& GetAccessByte()
{
return reinterpret_cast<std::uint8_t*>(m_metadata)[s_accessBitByte];
}
// TODO: Create an endian test and assert it. (Works only on little endian).
// The byte that contains the most significant bit.
static constexpr std::uint8_t s_accessBitByte = 3U;
// Most significant bit is set.
static constexpr std::uint8_t s_accessSetMask = 1U << 7;
static constexpr std::uint8_t s_accessUnsetMask = s_accessSetMask ^ 0xFF;
// The rest of bits other than the most significant bit are set.
static constexpr std::uint32_t s_epochTimeMask = 0x7FFFFFFF;
// The most significant bit is a CLOCK bit. It is set to 1 upon access
// and reset to 0 by the cache eviction.
// The rest of the bits are used for storing the epoch time in seconds.
std::uint32_t* m_metadata = nullptr;
};
} // namespace Cache
} // namespace HashTable
} // namespace L4

Просмотреть файл

@ -0,0 +1,216 @@
#pragma once
#include <cstdint>
#include "HashTable/IHashTable.h"
#include "Utils/Exception.h"
namespace L4
{
namespace HashTable
{
// Record struct consists of key and value pair.
struct Record
{
using Key = IReadOnlyHashTable::Key;
using Value = IReadOnlyHashTable::Value;
Record() = default;
Record(
const Key& key,
const Value& value)
: m_key{ key }
, m_value{ value }
{}
Key m_key;
Value m_value;
};
// RecordBuffer is a thin wrapper struct around a raw buffer array (pointer).
// The warning is "nonstandard extension used : zero-sized array in struct/union."
#pragma warning (push)
#pragma warning (disable:4200)
struct RecordBuffer
{
std::uint8_t m_buffer[];
};
#pragma warning (pop)
// RecordSerializer provides a functionality to serialize/deserialize a record information.
class RecordSerializer
{
public:
using Key = Record::Key;
using Value = Record::Value;
using KeySize = Key::size_type;
using ValueSize = Value::size_type;
RecordSerializer(
KeySize fixedKeySize,
ValueSize fixedValueSize,
ValueSize metadataSize = 0U)
: m_fixedKeySize{ fixedKeySize }
, m_fixedValueSize{ fixedValueSize }
, m_metadataSize{ metadataSize }
{}
// Returns the number of bytes needed for serializing the given key and value.
std::size_t CalculateBufferSize(const Key& key, const Value& value) const
{
return
((m_fixedKeySize != 0)
? m_fixedKeySize
: (key.m_size + sizeof(KeySize)))
+ ((m_fixedValueSize != 0)
? m_fixedValueSize + m_metadataSize
: (value.m_size + sizeof(ValueSize) + m_metadataSize));
}
// Returns the number bytes used for key and value sizes.
std::size_t CalculateRecordOverhead() const
{
return
(m_fixedKeySize != 0 ? 0U : sizeof(KeySize))
+ (m_fixedValueSize != 0 ? 0U : sizeof(ValueSize));
}
// Serializes the given key and value to the given buffer.
// Note that the buffer size is at least as big as the number of bytes
// returned by CalculateBufferSize().
RecordBuffer* Serialize(
const Key& key,
const Value& value,
std::uint8_t* const buffer,
std::size_t bufferSize) const
{
Validate(key, value);
assert(CalculateBufferSize(key, value) <= bufferSize);
(void)bufferSize;
const auto start = SerializeSizes(buffer, key.m_size, value.m_size);
memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size);
memcpy_s(buffer + start + key.m_size, value.m_size, value.m_data, value.m_size);
return reinterpret_cast<RecordBuffer*>(buffer);
}
// Serializes the given key, value and meta value to the given buffer.
// The meta value is serialized between key and value.
// Note that the buffer size is at least as big as the number of bytes
// returned by CalculateBufferSize().
RecordBuffer* Serialize(
const Key& key,
const Value& value,
const Value& metaValue,
std::uint8_t* const buffer,
std::size_t bufferSize) const
{
Validate(key, value, metaValue);
assert(CalculateBufferSize(key, value) <= bufferSize);
(void)bufferSize;
const auto start = SerializeSizes(buffer, key.m_size, value.m_size + metaValue.m_size);
memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size);
memcpy_s(buffer + start + key.m_size, metaValue.m_size, metaValue.m_data, metaValue.m_size);
memcpy_s(buffer + start + key.m_size + metaValue.m_size, value.m_size, value.m_data, value.m_size);
return reinterpret_cast<RecordBuffer*>(buffer);
}
// Deserializes the given buffer and returns a Record object.
Record Deserialize(const RecordBuffer& buffer) const
{
Record record;
const auto* dataBuffer = buffer.m_buffer;
auto& key = record.m_key;
if (m_fixedKeySize != 0)
{
key.m_size = m_fixedKeySize;
}
else
{
key.m_size = *reinterpret_cast<const KeySize*>(dataBuffer);
dataBuffer += sizeof(KeySize);
}
auto& value = record.m_value;
if (m_fixedValueSize != 0)
{
value.m_size = m_fixedValueSize + m_metadataSize;
}
else
{
value.m_size = *reinterpret_cast<const ValueSize*>(dataBuffer);
dataBuffer += sizeof(ValueSize);
}
key.m_data = dataBuffer;
value.m_data = dataBuffer + key.m_size;
return record;
}
private:
// Validates key and value sizes when fixed sizes are set.
// Throws an exception if invalid sizes are used.
void Validate(const Key& key, const Value& value) const
{
if ((m_fixedKeySize != 0 && key.m_size != m_fixedKeySize)
|| (m_fixedValueSize != 0 && value.m_size != m_fixedValueSize))
{
throw RuntimeException("Invalid key or value sizes are given.");
}
}
// Validates against the given meta value.
void Validate(const Key& key, const Value& value, const Value& metaValue) const
{
Validate(key, value);
if (m_metadataSize != metaValue.m_size)
{
throw RuntimeException("Invalid meta value size is given.");
}
}
// Serializes size information to the given buffer.
// It assumes that buffer has enough size for serialization.
std::size_t SerializeSizes(
std::uint8_t* const buffer,
KeySize keySize,
ValueSize valueSize) const
{
auto curBuffer = buffer;
if (m_fixedKeySize == 0)
{
*reinterpret_cast<KeySize*>(curBuffer) = keySize;
curBuffer += sizeof(keySize);
}
if (m_fixedValueSize == 0)
{
*reinterpret_cast<ValueSize*>(curBuffer) = valueSize;
curBuffer += sizeof(valueSize);
}
return curBuffer - buffer;
}
const KeySize m_fixedKeySize;
const ValueSize m_fixedValueSize;
const ValueSize m_metadataSize;
};
} // namespace HashTable
} // namespace L4

Просмотреть файл

@ -0,0 +1,32 @@
#pragma once
#include <cstdint>
#include "HashTable/Common/SharedHashTable.h"
#include "HashTable/Config.h"
namespace L4
{
namespace HashTable
{
// SettingAdapter class provides a functionality to convert a HashTableConfig::Setting object
// to a SharedHashTable::Setting object.
class SettingAdapter
{
public:
template <typename SharedHashTable>
typename SharedHashTable::Setting Convert(const HashTableConfig::Setting& from) const
{
typename SharedHashTable::Setting to;
to.m_numBuckets = from.m_numBuckets;
to.m_numBucketsPerMutex = (std::max)(from.m_numBucketsPerMutex.get_value_or(1U), 1U);
to.m_fixedKeySize = from.m_fixedKeySize.get_value_or(0U);
to.m_fixedValueSize = from.m_fixedValueSize.get_value_or(0U);
return to;
}
};
} // namespace HashTable
} // namespace L4

Просмотреть файл

@ -0,0 +1,206 @@
#pragma once
#include <cstdint>
#include <mutex>
#include "HashTable/IHashTable.h"
#include "Interprocess/Container/Vector.h"
#include "Log/PerfCounter.h"
#include "Utils/AtomicOffsetPtr.h"
#include "Utils/Exception.h"
#include "Utils/Lock.h"
namespace L4
{
namespace HashTable
{
// SharedHashTable struct represents the hash table structure.
template <typename TData, typename TAllocator>
struct SharedHashTable
{
using Data = TData;
using Allocator = TAllocator;
// HashTable::Entry struct represents an entry in the chained bucket list.
// Entry layout is as follows:
//
// | tag1 | tag2 | tag3 | tag4 | tag5 | tag6 | tag7 | tag 8 | 1
// | tag9 | tag10 | tag11 | tag12 | tag13 | tag14 | tag15 | tag 16 | 2
// | Data1 pointer | 3
// | Data2 pointer | 4
// | Data3 pointer | 5
// | Data4 pointer | 6
// | Data5 pointer | 7
// | Data6 pointer | 8
// | Data7 pointer | 9
// | Data8 pointer | 10
// | Data9 pointer | 11
// | Data10 pointer | 12
// | Data11 pointer | 13
// | Data12 pointer | 14
// | Data13 pointer | 15
// | Data14 pointer | 16
// | Data15 pointer | 17
// | Data16 pointer | 18
// | Entry pointer to the next Entry | 19
// <----------------------8 bytes ---------------------------------->
// , where tag1 is a tag for Data1, tag2 for Data2, and so on. A tag value can be looked up
// first before going to the corresponding Data for a quick check.
// Also note that a byte read is atomic in modern processors so that tag is just
// std::uint8_t instead of being atomic. Even in the case where the tag value read is a garbage ,
// this is acceptable because of the followings:
// 1) if the garbage value was a hit where it should have been a miss: the actual key comparison will fail,
// 2) if the garbage value was a miss where it should have been a hit: the key value must
// have been changed since the tag was changed, so it will be looked up correctly
// after the tag value written is visible correctly. Note that we don't need to guarantee the timing of
// writing and reading (meaning the value written should be visible to the reader right away).
//
// Note about the CPU cache. In previous implementation, the Entry was 64 bytes to fit in the CPU cache.
// However, this resulted in lots of wasted space. For example, when the ratio of the number of expected records
// to the number of buckets was 2:1, only 85% buckets were occupied. After experiments, if you have 10:1 ratio,
// you will have 99.98% utilization of buckets. This required having more data per Entry, and the ideal number
// (after experiments) turned out to be 16 records per Entry. Also, because of how CPU fetches contiguous memory,
// this didn't have any impact on micro-benchmarking.
struct Entry
{
Entry() = default;
// Releases deallocates all the memories of the chained entries including
// the data list in the current Entry.
void Release(Allocator allocator)
{
auto dataDeleter = [allocator](auto& data)
{
auto dataToDelete = data.Load();
if (dataToDelete != nullptr)
{
dataToDelete->~Data();
Allocator::rebind<Data>::other(allocator).deallocate(dataToDelete, 1U);
}
};
// Delete all the chained entries, not including itself.
auto curEntry = m_next.Load();
while (curEntry != nullptr)
{
auto entryToDelete = curEntry;
// Copy m_next for the next iteration.
curEntry = entryToDelete->m_next.Load();
// Delete all the data within this entry.
for (auto& data : entryToDelete->m_dataList)
{
dataDeleter(data);
}
// Clean the current entry itself.
entryToDelete->~Entry();
Allocator::rebind<Entry>::other(allocator).deallocate(entryToDelete, 1U);
}
// Delete all the data from the head of chained entries.
for (auto& data : m_dataList)
{
dataDeleter(data);
}
}
static constexpr std::uint8_t c_numDataPerEntry = 16U;
std::array<std::uint8_t, c_numDataPerEntry> m_tags{ 0U };
std::array<Utils::AtomicOffsetPtr<Data>, c_numDataPerEntry> m_dataList{};
Utils::AtomicOffsetPtr<Entry> m_next{};
};
static_assert(sizeof(Entry) == 152, "Entry should be 152 bytes.");
struct Setting
{
using KeySize = IReadOnlyHashTable::Key::size_type;
using ValueSize = IReadOnlyHashTable::Value::size_type;
Setting() = default;
explicit Setting(
std::uint32_t numBuckets,
std::uint32_t numBucketsPerMutex = 1U,
KeySize fixedKeySize = 0U,
ValueSize fixedValueSize = 0U)
: m_numBuckets{ numBuckets }
, m_numBucketsPerMutex{ numBucketsPerMutex }
, m_fixedKeySize{ fixedKeySize }
, m_fixedValueSize{ fixedValueSize }
{}
std::uint32_t m_numBuckets = 1U;
std::uint32_t m_numBucketsPerMutex = 1U;
KeySize m_fixedKeySize = 0U;
ValueSize m_fixedValueSize = 0U;
};
SharedHashTable::SharedHashTable(
const Setting& setting,
Allocator allocator)
: m_allocator{ allocator }
, m_setting{ setting }
, m_buckets{ setting.m_numBuckets, Allocator::rebind<Entry>::other(m_allocator) }
, m_mutexes{
(std::max)(setting.m_numBuckets / (std::max)(setting.m_numBucketsPerMutex, 1U), 1U),
Allocator::rebind<Mutex>::other(m_allocator) }
, m_perfData{}
{
m_perfData.Set(HashTablePerfCounter::BucketsCount, m_buckets.size());
m_perfData.Set(
HashTablePerfCounter::TotalIndexSize,
(m_buckets.size() * sizeof(Entry))
+ (m_mutexes.size() * sizeof(Mutex))
+ sizeof(SharedHashTable));
}
SharedHashTable::~SharedHashTable()
{
for (auto& bucket : m_buckets)
{
bucket.Release(m_allocator);
}
}
using Mutex = Utils::ReaderWriterLockSlim;
using Lock = std::lock_guard<Mutex>;
using UniqueLock = std::unique_lock<Mutex>;
using Buckets = Interprocess::Container::Vector<Entry, typename Allocator::template rebind<Entry>::other>;
using Mutexes = Interprocess::Container::Vector<Mutex, typename Allocator::template rebind<Mutex>::other>;
template <typename T>
auto GetAllocator() const
{
return Allocator::rebind<T>::other(m_allocator);
}
Mutex& GetMutex(std::size_t index)
{
return m_mutexes[index % m_mutexes.size()];
}
Allocator m_allocator;
const Setting m_setting;
Buckets m_buckets;
Mutexes m_mutexes;
HashTablePerfData m_perfData;
SharedHashTable(const SharedHashTable&) = delete;
SharedHashTable& operator=(const SharedHashTable&) = delete;
};
} // namespace HashTable
} // namespace L4

91
inc/L4/HashTable/Config.h Normal file
Просмотреть файл

@ -0,0 +1,91 @@
#pragma once
#include <boost/optional.hpp>
#include <cassert>
#include <cstdint>
#include <chrono>
#include <memory>
#include "HashTable/IHashTable.h"
#include "Serialization/IStream.h"
#include "Utils/Properties.h"
namespace L4
{
// HashTableConfig struct.
struct HashTableConfig
{
struct Setting
{
using KeySize = IReadOnlyHashTable::Key::size_type;
using ValueSize = IReadOnlyHashTable::Value::size_type;
explicit Setting(
std::uint32_t numBuckets,
boost::optional<std::uint32_t> numBucketsPerMutex = {},
boost::optional<KeySize> fixedKeySize = {},
boost::optional<ValueSize> fixedValueSize = {})
: m_numBuckets{ numBuckets }
, m_numBucketsPerMutex{ numBucketsPerMutex }
, m_fixedKeySize{ fixedKeySize }
, m_fixedValueSize{ fixedValueSize }
{}
std::uint32_t m_numBuckets;
boost::optional<std::uint32_t> m_numBucketsPerMutex;
boost::optional<KeySize> m_fixedKeySize;
boost::optional<ValueSize> m_fixedValueSize;
};
struct Cache
{
Cache(
std::uint64_t maxCacheSizeInBytes,
std::chrono::seconds recordTimeToLive,
bool forceTimeBasedEviction)
: m_maxCacheSizeInBytes{ maxCacheSizeInBytes }
, m_recordTimeToLive{ recordTimeToLive }
, m_forceTimeBasedEviction{ forceTimeBasedEviction }
{}
std::uint64_t m_maxCacheSizeInBytes;
std::chrono::seconds m_recordTimeToLive;
bool m_forceTimeBasedEviction;
};
struct Serializer
{
using Properties = Utils::Properties;
Serializer(
std::shared_ptr<IStreamReader> streamReader = {},
boost::optional<Properties> properties = {})
: m_streamReader{ streamReader }
, m_properties{ properties }
{}
std::shared_ptr<IStreamReader> m_streamReader;
boost::optional<Properties> m_properties;
};
HashTableConfig(
std::string name,
Setting setting,
boost::optional<Cache> cache = {},
boost::optional<Serializer> serializer = {})
: m_name{ std::move(name) }
, m_setting{ std::move(setting) }
, m_cache{ cache }
, m_serializer{ serializer }
{
assert(m_setting.m_numBuckets > 0U
|| (m_serializer && (serializer->m_streamReader != nullptr)));
}
std::string m_name;
Setting m_setting;
boost::optional<Cache> m_cache;
boost::optional<Serializer> m_serializer;
};
} // namespace L4

Просмотреть файл

@ -0,0 +1,102 @@
#pragma once
#include <cstdint>
#include "Log/PerfCounter.h"
#include "Serialization/IStream.h"
#include "Utils/Properties.h"
namespace L4
{
// IReadOnlyHashTable interface for read-only access to the hash table.
struct IReadOnlyHashTable
{
// Blob struct that represents a memory blob.
template <typename TSize>
struct Blob
{
using size_type = TSize;
explicit Blob(const std::uint8_t* data = nullptr, size_type size = 0U)
: m_data{ data }
, m_size{ size }
{
static_assert(std::numeric_limits<size_type>::is_integer, "size_type is not an integer.");
}
bool operator==(const Blob& other) const
{
return (m_size == other.m_size)
&& !memcmp(m_data, other.m_data, m_size);
}
bool operator!=(const Blob& other) const
{
return !(*this == other);
}
const std::uint8_t* m_data;
size_type m_size;
};
using Key = Blob<std::uint16_t>;
using Value = Blob<std::uint32_t>;
struct IIterator;
using IIteratorPtr = std::unique_ptr<IIterator>;
virtual ~IReadOnlyHashTable() = default;
virtual bool Get(const Key& key, Value& value) const = 0;
virtual IIteratorPtr GetIterator() const = 0;
virtual const HashTablePerfData& GetPerfData() const = 0;
};
// IReadOnlyHashTable::IIterator interface for the hash table iterator.
struct IReadOnlyHashTable::IIterator
{
virtual ~IIterator() = default;
virtual void Reset() = 0;
virtual bool MoveNext() = 0;
virtual Key GetKey() const = 0;
virtual Value GetValue() const = 0;
};
// IWritableHashTable interface for write access to the hash table.
struct IWritableHashTable : public virtual IReadOnlyHashTable
{
struct ISerializer;
using ISerializerPtr = std::unique_ptr<ISerializer>;
virtual void Add(const Key& key, const Value& value) = 0;
virtual bool Remove(const Key& key) = 0;
virtual ISerializerPtr GetSerializer() const = 0;
};
// IWritableHashTable::ISerializer interface for serializing hash table.
struct IWritableHashTable::ISerializer
{
virtual ~ISerializer() = default;
virtual void Serialize(
IStreamWriter& writer,
const Utils::Properties& properties) = 0;
};
} // namespace L4

Просмотреть файл

@ -0,0 +1,578 @@
#pragma once
#include <boost/optional.hpp>
#include <cstdint>
#include <mutex>
#include "detail/ToRawPointer.h"
#include "Epoch/IEpochActionManager.h"
#include "HashTable/Common/SharedHashTable.h"
#include "HashTable/Common/Record.h"
#include "HashTable/IHashTable.h"
#include "HashTable/ReadWrite/Serializer.h"
#include "Log/PerfCounter.h"
#include "Serialization/IStream.h"
#include "Utils/Exception.h"
#include "Utils/MurmurHash3.h"
#include "Utils/Properties.h"
namespace L4
{
// ReadWriteHashTable is a general purpose hash table where the look up is look free.
namespace HashTable
{
namespace ReadWrite
{
// ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides
// the functionality to read data given a key.
template <typename Allocator>
class ReadOnlyHashTable : public virtual IReadOnlyHashTable
{
public:
using HashTable = SharedHashTable<RecordBuffer, Allocator>;
class Iterator;
explicit ReadOnlyHashTable(
HashTable& hashTable,
boost::optional<RecordSerializer> recordSerializer = boost::none)
: m_hashTable{ hashTable }
, m_recordSerializer{
recordSerializer
? *recordSerializer
: RecordSerializer{
m_hashTable.m_setting.m_fixedKeySize,
m_hashTable.m_setting.m_fixedValueSize } }
{}
virtual bool Get(const Key& key, Value& value) const override
{
const auto bucketInfo = GetBucketInfo(key);
const auto* entry = &m_hashTable.m_buckets[bucketInfo.first];
while (entry != nullptr)
{
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
{
if (bucketInfo.second == entry->m_tags[i])
{
// There could be a race condition where m_dataList[i] is updated during access.
// Therefore, load it once and save it (it's safe to store it b/c the memory
// will not be deleted until ref count becomes 0).
const auto data = entry->m_dataList[i].Load(std::memory_order_acquire);
if (data != nullptr)
{
const auto record = m_recordSerializer.Deserialize(*data);
if (record.m_key == key)
{
value = record.m_value;
return true;
}
}
}
}
entry = entry->m_next.Load(std::memory_order_acquire);
}
return false;
}
virtual IIteratorPtr GetIterator() const override
{
return std::make_unique<Iterator>(m_hashTable, m_recordSerializer);
}
virtual const HashTablePerfData& GetPerfData() const override
{
// Synchronizes with any std::memory_order_release if there exists, so that
// HashTablePerfData has the latest values at the moment when GetPerfData() is called.
std::atomic_thread_fence(std::memory_order_acquire);
return m_hashTable.m_perfData;
}
ReadOnlyHashTable(const ReadOnlyHashTable&) = delete;
ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete;
protected:
// GetBucketInfo returns a pair, where the first is the index to the bucket
// and the second is the tag value for the given key.
// In this hash table, we treat tag value of 0 as empty (see WritableHashTable::Remove()),
// so in the worst case scenario, where an entry has an empty data list and the tag
// value returned for the key is 0, the look up cost is up to 6 checks. We can do something
// smarter by using the unused two bytes per Entry, but since an Entry object fits into
// CPU cache, the extra overhead should be minimal.
std::pair<std::uint32_t, std::uint8_t> GetBucketInfo(const Key& key) const
{
std::array<std::uint64_t, 2> hash;
MurmurHash3_x64_128(key.m_data, key.m_size, 0U, hash.data());
return {
static_cast<std::uint32_t>(hash[0] % m_hashTable.m_buckets.size()),
static_cast<std::uint8_t>(hash[1]) };
}
HashTable& m_hashTable;
RecordSerializer m_recordSerializer;
};
// ReadOnlyHashTable::Iterator class implements IIterator interface and provides
// read-only iterator for the ReadOnlyHashTable.
template <typename Allocator>
class ReadOnlyHashTable<Allocator>::Iterator : public IIterator
{
public:
Iterator(
const HashTable& hashTable,
const RecordSerializer& recordDeserializer)
: m_hashTable{ hashTable }
, m_recordSerializer{ recordDeserializer }
, m_currentBucketIndex{ -1 }
, m_currentRecordIndex{ 0U }
, m_currentEntry{ nullptr }
{}
Iterator(Iterator&& iterator)
: m_hashTable{ std::move(iterator.m_hashTable) }
, m_recordSerializer{ std::move(iterator.recordDeserializer) }
, m_currentBucketIndex{ std::move(iterator.m_currentBucketIndex) }
, m_currentRecordIndex{ std::move(iterator.m_currentRecordIndex) }
, m_currentEntry{ std::move(iterator.m_currentEntry) }
{}
void Reset() override
{
m_currentBucketIndex = -1;
m_currentRecordIndex = 0U;
m_currentEntry = nullptr;
}
bool MoveNext() override
{
if (IsEnd())
{
return false;
}
if (m_currentEntry != nullptr)
{
MoveToNextData();
}
assert(m_currentRecordIndex < HashTable::Entry::c_numDataPerEntry);
while ((m_currentEntry == nullptr)
|| (m_currentRecord = m_currentEntry->m_dataList[m_currentRecordIndex].Load()) == nullptr)
{
if (m_currentEntry == nullptr)
{
++m_currentBucketIndex;
m_currentRecordIndex = 0U;
if (IsEnd())
{
return false;
}
m_currentEntry = &m_hashTable.m_buckets[m_currentBucketIndex];
}
else
{
MoveToNextData();
}
}
assert(m_currentEntry != nullptr);
assert(m_currentRecord != nullptr);
return true;
}
Key GetKey() const override
{
if (!IsValid())
{
throw RuntimeException("HashTableIterator is not correctly used.");
}
return m_recordSerializer.Deserialize(*m_currentRecord).m_key;
}
Value GetValue() const override
{
if (!IsValid())
{
throw RuntimeException("HashTableIterator is not correctly used.");
}
return m_recordSerializer.Deserialize(*m_currentRecord).m_value;
}
Iterator(const Iterator&) = delete;
Iterator& operator=(const Iterator&) = delete;
private:
bool IsValid() const
{
return !IsEnd()
&& (m_currentEntry != nullptr)
&& (m_currentRecord != nullptr);
}
bool IsEnd() const
{
return m_currentBucketIndex == static_cast<std::int64_t>(m_hashTable.m_buckets.size());
}
void MoveToNextData()
{
if (++m_currentRecordIndex >= HashTable::Entry::c_numDataPerEntry)
{
m_currentRecordIndex = 0U;
m_currentEntry = m_currentEntry->m_next.Load();
}
}
const HashTable& m_hashTable;
const RecordSerializer& m_recordSerializer;
std::int64_t m_currentBucketIndex;
std::uint8_t m_currentRecordIndex;
const typename HashTable::Entry* m_currentEntry;
const RecordBuffer* m_currentRecord;
};
// The following warning is from the virtual inheritance and safe to disable in this case.
// https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx
#pragma warning(push)
#pragma warning(disable:4250)
// WritableHashTable class implements IWritableHashTable interface and also provides
// the read only access (Get()) to the hash table.
// Note the virtual inheritance on ReadOnlyHashTable<Allocator> so that any derived class
// can have only one ReadOnlyHashTable base class instance.
template <typename Allocator>
class WritableHashTable
: public virtual ReadOnlyHashTable<Allocator>
, public IWritableHashTable
{
public:
WritableHashTable(
HashTable& hashTable,
IEpochActionManager& epochManager)
: ReadOnlyHashTable(hashTable)
, m_epochManager{ epochManager }
{}
virtual void Add(const Key& key, const Value& value) override
{
Add(CreateRecordBuffer(key, value));
}
virtual bool Remove(const Key& key) override
{
const auto bucketInfo = GetBucketInfo(key);
auto* entry = &m_hashTable.m_buckets[bucketInfo.first];
HashTable::Lock lock{ m_hashTable.GetMutex(bucketInfo.first) };
// Note that similar to Add(), the following block is performed inside a critical section,
// therefore, it is safe to do "Load"s with memory_order_relaxed.
while (entry != nullptr)
{
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
{
if (bucketInfo.second == entry->m_tags[i])
{
const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed);
if (data != nullptr)
{
const auto record = m_recordSerializer.Deserialize(*data);
if (record.m_key == key)
{
Remove(*entry, i);
return true;
}
}
}
}
entry = entry->m_next.Load(std::memory_order_relaxed);
}
return false;
}
virtual ISerializerPtr GetSerializer() const override
{
return std::make_unique<WritableHashTable::Serializer>(m_hashTable);
}
protected:
void Add(RecordBuffer* recordToAdd)
{
assert(recordToAdd != nullptr);
const auto newRecord = m_recordSerializer.Deserialize(*recordToAdd);
const auto& newKey = newRecord.m_key;
const auto& newValue = newRecord.m_value;
Stat stat{ newKey.m_size, newValue.m_size };
const auto bucketInfo = GetBucketInfo(newKey);
auto* curEntry = &m_hashTable.m_buckets[bucketInfo.first];
HashTable::Entry* entryToUpdate = nullptr;
std::uint8_t curDataIndex = 0U;
HashTable::UniqueLock lock{ m_hashTable.GetMutex(bucketInfo.first) };
// Note that the following block is performed inside a critical section, therefore,
// it is safe to do "Load"s with memory_order_relaxed.
while (curEntry != nullptr)
{
++stat.m_chainIndex;
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
{
const auto data = curEntry->m_dataList[i].Load(std::memory_order_relaxed);
if (data == nullptr)
{
if (entryToUpdate == nullptr)
{
// Found an entry with no data set, but still need to go through the end of
// the list to see if an entry with the given key exists.
entryToUpdate = curEntry;
curDataIndex = i;
}
}
else if (curEntry->m_tags[i] == bucketInfo.second)
{
const auto oldRecord = m_recordSerializer.Deserialize(*data);
if (newKey == oldRecord.m_key)
{
// Will overwrite this entry data.
entryToUpdate = curEntry;
curDataIndex = i;
stat.m_oldValueSize = oldRecord.m_value.m_size;
break;
}
}
}
// Found the entry data to replaces.
if (stat.m_oldValueSize != 0U)
{
break;
}
// Check if this is the end of the chaining. If so, create a new entry if we haven't found
// any entry to update along the way.
if (entryToUpdate == nullptr && curEntry->m_next.Load(std::memory_order_relaxed) == nullptr)
{
curEntry->m_next.Store(
new (Detail::to_raw_pointer(
m_hashTable.GetAllocator<HashTable::Entry>().allocate(1U)))
HashTable::Entry(),
std::memory_order_release);
stat.m_isNewEntryAdded = true;
}
curEntry = curEntry->m_next.Load(std::memory_order_relaxed);
}
assert(entryToUpdate != nullptr);
auto recordToDelete = UpdateRecord(*entryToUpdate, curDataIndex, recordToAdd, bucketInfo.second);
lock.unlock();
UpdatePerfDataForAdd(stat);
ReleaseRecord(recordToDelete);
}
// The chainIndex is the 1-based index for the given entry in the chained bucket list.
// It is assumed that this function is called under a lock.
void Remove(typename HashTable::Entry& entry, std::uint8_t index)
{
auto recordToDelete = UpdateRecord(entry, index, nullptr, 0U);
assert(recordToDelete != nullptr);
const auto record = m_recordSerializer.Deserialize(*recordToDelete);
UpdatePerfDataForRemove(
Stat{
record.m_key.m_size,
record.m_value.m_size,
0U
});
ReleaseRecord(recordToDelete);
}
private:
struct Stat;
class Serializer;
RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value)
{
const auto bufferSize = m_recordSerializer.CalculateBufferSize(key, value);
auto buffer = Detail::to_raw_pointer(
m_hashTable.GetAllocator<std::uint8_t>().allocate(bufferSize));
return m_recordSerializer.Serialize(key, value, buffer, bufferSize);
}
RecordBuffer* UpdateRecord(typename HashTable::Entry& entry, std::uint8_t index, RecordBuffer* newRecord, std::uint8_t newTag)
{
// This function should be called under a lock, so calling with memory_order_relaxed for Load() is safe.
auto& recordHolder = entry.m_dataList[index];
auto oldRecord = recordHolder.Load(std::memory_order_relaxed);
recordHolder.Store(newRecord, std::memory_order_release);
entry.m_tags[index] = newTag;
return oldRecord;
}
void ReleaseRecord(RecordBuffer* record)
{
if (record == nullptr)
{
return;
}
m_epochManager.RegisterAction(
[this, record]()
{
record->~RecordBuffer();
m_hashTable.GetAllocator<RecordBuffer>().deallocate(record, 1U);
});
}
void UpdatePerfDataForAdd(const Stat& stat)
{
auto& perfData = m_hashTable.m_perfData;
if (stat.m_oldValueSize != 0U)
{
// Updating the existing record. Therefore, no change in the key size.
perfData.Add(HashTablePerfCounter::TotalValueSize,
static_cast<HashTablePerfData::TValue>(stat.m_valueSize) - stat.m_oldValueSize);
}
else
{
// We are adding a new data instead of replacing.
perfData.Add(HashTablePerfCounter::TotalKeySize, stat.m_keySize);
perfData.Add(HashTablePerfCounter::TotalValueSize, stat.m_valueSize);
perfData.Add(HashTablePerfCounter::TotalIndexSize,
// Record overhead.
m_recordSerializer.CalculateRecordOverhead()
// Entry overhead if created.
+ (stat.m_isNewEntryAdded ? sizeof(HashTable::Entry) : 0U));
perfData.Min(HashTablePerfCounter::MinKeySize, stat.m_keySize);
perfData.Max(HashTablePerfCounter::MaxKeySize, stat.m_keySize);
perfData.Increment(HashTablePerfCounter::RecordsCount);
if (stat.m_isNewEntryAdded)
{
perfData.Increment(HashTablePerfCounter::ChainingEntriesCount);
if (stat.m_chainIndex > 1U)
{
perfData.Max(HashTablePerfCounter::MaxBucketChainLength, stat.m_chainIndex);
}
}
}
perfData.Min(HashTablePerfCounter::MinValueSize, stat.m_valueSize);
perfData.Max(HashTablePerfCounter::MaxValueSize, stat.m_valueSize);
}
void UpdatePerfDataForRemove(const Stat& stat)
{
auto& perfData = m_hashTable.m_perfData;
perfData.Decrement(HashTablePerfCounter::RecordsCount);
perfData.Subtract(HashTablePerfCounter::TotalKeySize, stat.m_keySize);
perfData.Subtract(HashTablePerfCounter::TotalValueSize, stat.m_valueSize);
perfData.Subtract(HashTablePerfCounter::TotalIndexSize, m_recordSerializer.CalculateRecordOverhead());
}
IEpochActionManager& m_epochManager;
};
#pragma warning(pop)
// WritableHashTable::Stat struct encapsulates stats for Add()/Remove().
template <typename Allocator>
struct WritableHashTable<Allocator>::Stat
{
using KeySize = Key::size_type;
using ValueSize = Value::size_type;
explicit Stat(
KeySize keySize = 0U,
ValueSize valueSize = 0U,
ValueSize oldValueSize = 0U,
std::uint32_t chainIndex = 0U,
bool isNewEntryAdded = false)
: m_keySize{ keySize }
, m_valueSize{ valueSize }
, m_oldValueSize{ oldValueSize }
, m_chainIndex{ chainIndex }
, m_isNewEntryAdded{ isNewEntryAdded }
{}
KeySize m_keySize;
ValueSize m_valueSize;
ValueSize m_oldValueSize;
std::uint32_t m_chainIndex;
bool m_isNewEntryAdded;
};
// WritableHashTable::Serializer class that implements ISerializer, which provides
// the functionality to serialize the WritableHashTable.
template <typename Allocator>
class WritableHashTable<Allocator>::Serializer : public IWritableHashTable::ISerializer
{
public:
explicit Serializer(HashTable& hashTable)
: m_hashTable{ hashTable }
{}
Serializer(const Serializer&) = delete;
Serializer& operator=(const Serializer&) = delete;
void Serialize(
IStreamWriter& writer,
const Utils::Properties& /* properties */) override
{
ReadWrite::Serializer<HashTable>{}.Serialize(m_hashTable, writer);
}
private:
HashTable& m_hashTable;
};
} // namespace ReadWrite
} // namespace HashTable
} // namespace L4

Просмотреть файл

@ -0,0 +1,268 @@
#pragma once
#include <cstdint>
#include <boost/format.hpp>
#include "Epoch/IEpochActionManager.h"
#include "Log/PerfCounter.h"
#include "Serialization/IStream.h"
#include "Serialization/SerializerHelper.h"
#include "Utils/Exception.h"
#include "Utils/Properties.h"
namespace L4
{
namespace HashTable
{
namespace ReadWrite
{
// Note that the HashTable template parameter in this file is
// HashTable::ReadWrite::ReadOnlyHashTable<Allocator>::HashTable.
// However, due to the cyclic dependency, it needs to be passed as a template type.
// Interface for a serializer for the given HashTable type.
template <typename HashTable>
struct ISerializer
{
virtual ~ISerializer() = default;
// Assumes writer has not been started yet, thus the serializer is responsible
// for calling Begin() and End()on the writer.
virtual void Serialize(
HashTable& hashTable,
IStreamWriter& writer) const = 0;
};
// Interface for a deserializer for the given Memory and HashTable type.
template <typename Memory, typename HashTable>
struct IDeserializer
{
virtual ~IDeserializer() = default;
// Assumes that reader.Begin() has already been called and the version info has been read.
// The serializer should call reader.End() before Deserialize() returns.
virtual typename Memory::template UniquePtr<HashTable> Deserialize(
Memory& memory,
IStreamReader& reader) const = 0;
};
// All the deprecated (previous versions) serializer should be put inside the Deprecated namespace.
// Removing any of the Deprecated serializers from the source code will require the major package version change.
namespace Deprecated
{
} // namespace Deprecated
namespace Current
{
constexpr std::uint8_t c_version = 3U;
// Current serializer used for serializing hash tables.
// The serialization format of Serializer is:
// <Version Id = 3> <Hash table settings> followed by
// If the next byte is set to 1:
// <Key size> <Key bytes> <Value size> <Value bytes>
// Otherwise, end of the records.
template <typename HashTable>
class Serializer : public ISerializer<HashTable>
{
public:
Serializer() = default;
Serializer(const Serializer&) = delete;
Serializer& operator=(const Serializer&) = delete;
void Serialize(
HashTable& hashTable,
IStreamWriter& writer) const override
{
writer.Begin();
auto& perfData = hashTable.m_perfData;
perfData.Set(HashTablePerfCounter::RecordsCountSavedFromSerializer, 0);
SerializerHelper helper(writer);
helper.Serialize(c_version);
helper.Serialize(&hashTable.m_setting, sizeof(hashTable.m_setting));
ReadOnlyHashTable<HashTable::Allocator> readOnlyHashTable(hashTable);
auto iterator = readOnlyHashTable.GetIterator();
while (iterator->MoveNext())
{
helper.Serialize(true); // Indicates record exists.
const auto key = iterator->GetKey();
const auto value = iterator->GetValue();
helper.Serialize(key.m_size);
helper.Serialize(key.m_data, key.m_size);
helper.Serialize(value.m_size);
helper.Serialize(value.m_data, value.m_size);
perfData.Increment(HashTablePerfCounter::RecordsCountSavedFromSerializer);
}
helper.Serialize(false); // Indicates the end of records.
// Flush perf counter so that the values are up to date when GetPerfData() is called.
std::atomic_thread_fence(std::memory_order_release);
writer.End();
}
};
// Current Deserializer used for deserializing hash tables.
template <typename Memory, typename HashTable>
class Deserializer : public IDeserializer<Memory, HashTable>
{
public:
explicit Deserializer(const Utils::Properties& /* properties */)
{}
Deserializer(const Deserializer&) = delete;
Deserializer& operator=(const Deserializer&) = delete;
typename Memory::template UniquePtr<HashTable> Deserialize(
Memory& memory,
IStreamReader& reader) const override
{
DeserializerHelper helper(reader);
HashTable::Setting setting;
helper.Deserialize(setting);
auto hashTable{ memory.MakeUnique<HashTable>(
setting,
memory.GetAllocator()) };
EpochActionManager epochActionManager;
using Allocator = typename Memory:: template Allocator<>;
WritableHashTable<Allocator> writableHashTable(
*hashTable,
epochActionManager);
auto& perfData = hashTable->m_perfData;
std::vector<std::uint8_t> keyBuffer;
std::vector<std::uint8_t> valueBuffer;
bool hasMoreData = false;
helper.Deserialize(hasMoreData);
while (hasMoreData)
{
IReadOnlyHashTable::Key key;
IReadOnlyHashTable::Value value;
helper.Deserialize(key.m_size);
keyBuffer.resize(key.m_size);
helper.Deserialize(keyBuffer.data(), key.m_size);
key.m_data = keyBuffer.data();
helper.Deserialize(value.m_size);
valueBuffer.resize(value.m_size);
helper.Deserialize(valueBuffer.data(), value.m_size);
value.m_data = valueBuffer.data();
writableHashTable.Add(key, value);
helper.Deserialize(hasMoreData);
perfData.Increment(HashTablePerfCounter::RecordsCountLoadedFromSerializer);
}
// Flush perf counter so that the values are up to date when GetPerfData() is called.
std::atomic_thread_fence(std::memory_order_release);
reader.End();
return hashTable;
}
private:
// Deserializer internally uses WritableHashTable for deserialization, therefore
// an implementation of IEpochActionManager is needed. Since all the keys in the hash table
// are expected to be unique, no RegisterAction() should be called.
class EpochActionManager : public IEpochActionManager
{
public:
void RegisterAction(Action&& /* action */) override
{
// Since it is assumed that the serializer is loading from the stream generated by the same serializer,
// it is guaranteed that all the keys are unique (a property of a hash table). Therefore, RegisterAction()
// should not be called by the WritableHashTable.
throw RuntimeException("RegisterAction() should not be called from the serializer.");
}
};
};
} // namespace Current
// Serializer is the main driver for serializing a hash table.
// It always uses the Current::Serializer for serializing a hash table.
template <typename HashTable>
class Serializer
{
public:
Serializer() = default;
Serializer(const Serializer&) = delete;
Serializer& operator=(const Serializer&) = delete;
void Serialize(
HashTable& hashTable,
IStreamWriter& writer) const
{
Current::Serializer<HashTable>{}.Serialize(hashTable, writer);
}
};
// Deserializer is the main driver for deserializing the input stream to create a hash table.
template <typename Memory, typename HashTable>
class Deserializer
{
public:
explicit Deserializer(const Utils::Properties& properties)
: m_properties(properties)
{}
Deserializer(const Deserializer&) = delete;
Deserializer& operator=(const Deserializer&) = delete;
typename Memory::template UniquePtr<HashTable> Deserialize(
Memory& memory,
IStreamReader& reader) const
{
reader.Begin();
std::uint8_t version = 0U;
reader.Read(reinterpret_cast<std::uint8_t*>(&version), sizeof(version));
switch (version)
{
case Current::c_version:
return Current::Deserializer<Memory, HashTable>{ m_properties }.Deserialize(memory, reader);
default:
boost::format err("Unsupported version '%1%' is given.");
err % version;
throw RuntimeException(err.str());
}
}
private:
const Utils::Properties& m_properties;
};
} // namespace ReadWrite
} // namespace HashTable
} // namespace L4

Просмотреть файл

@ -0,0 +1,19 @@
#pragma once
#include <boost/interprocess/containers/list.hpp>
namespace L4
{
namespace Interprocess
{
namespace Container
{
template <typename T, typename Allocator>
using List = boost::interprocess::list<T, Allocator>;
} // namespace Container
} // namespace Interprocess
} // namespace L4

Просмотреть файл

@ -0,0 +1,19 @@
#pragma once
#include <boost/interprocess/containers/string.hpp>
namespace L4
{
namespace Interprocess
{
namespace Container
{
template <typename Allocator>
using String = boost::interprocess::basic_string<char, std::char_traits<char>, Allocator>;
} // namespace Container
} // namespace Interprocess
} // namespace L4

Просмотреть файл

@ -0,0 +1,19 @@
#pragma once
#include <boost/interprocess/containers/vector.hpp>
namespace L4
{
namespace Interprocess
{
namespace Container
{
template <typename T, typename Allocator>
using Vector = boost::interprocess::vector<T, Allocator>;
} // namespace Container
} // namespace Interprocess
} // namespace L4

Просмотреть файл

@ -0,0 +1,55 @@
#pragma once
#include "Epoch/EpochRefPolicy.h"
#include "EpochManager.h"
#include "HashTableManager.h"
namespace L4
{
namespace LocalMemory
{
class Context : private EpochRefPolicy<EpochManager::EpochRefManager>
{
public:
Context(
HashTableManager& hashTableManager,
EpochManager::EpochRefManager& epochRefManager)
: EpochRefPolicy<EpochManager::EpochRefManager>(epochRefManager)
, m_hashTableManager{ hashTableManager }
{}
Context(Context&& context)
: EpochRefPolicy<EpochManager::EpochRefManager>(std::move(context))
, m_hashTableManager{ context.m_hashTableManager }
{}
const IReadOnlyHashTable& operator[](const char* name) const
{
return m_hashTableManager.GetHashTable(name);
}
IWritableHashTable& operator[](const char* name)
{
return m_hashTableManager.GetHashTable(name);
}
const IReadOnlyHashTable& operator[](std::size_t index) const
{
return m_hashTableManager.GetHashTable(index);
}
IWritableHashTable& operator[](std::size_t index)
{
return m_hashTableManager.GetHashTable(index);
}
Context(const Context&) = delete;
Context& operator=(const Context&) = delete;
private:
HashTableManager& m_hashTableManager;
};
} // namespace LocalMemory
} // namespace L4

Просмотреть файл

@ -0,0 +1,124 @@
#pragma once
#include <atomic>
#include <boost/thread/shared_lock_guard.hpp>
#include <mutex>
#include "Epoch/Config.h"
#include "Epoch/EpochActionManager.h"
#include "Epoch/EpochQueue.h"
#include "Log/PerfCounter.h"
#include "Utils/Lock.h"
#include "Utils/RunningThread.h"
namespace L4
{
namespace LocalMemory
{
// EpochManager aggregates epoch-related functionalities such as adding/removing
// client epoch queues, registering/performing actions, and updating the epoch counters.
class EpochManager : public IEpochActionManager
{
public:
using EpochQueue = EpochQueue<
boost::shared_lock_guard<Utils::ReaderWriterLockSlim>,
std::lock_guard<Utils::ReaderWriterLockSlim>>;
using EpochRefManager = EpochRefManager<EpochQueue>;
EpochManager(
const EpochManagerConfig& config,
ServerPerfData& perfData)
: m_perfData{ perfData }
, m_config{ config }
, m_currentEpochCounter{ 0U }
, m_epochQueue{
m_currentEpochCounter,
m_config.m_epochQueueSize }
, m_epochRefManager{ m_epochQueue }
, m_epochCounterManager{ m_epochQueue }
, m_epochActionManager{ config.m_numActionQueues }
, m_processingThread{
m_config.m_epochProcessingInterval,
[this]
{
this->Remove();
this->Add();
}}
{}
EpochRefManager& GetEpochRefManager()
{
return m_epochRefManager;
}
void RegisterAction(Action&& action) override
{
m_epochActionManager.RegisterAction(m_currentEpochCounter, std::move(action));
m_perfData.Increment(ServerPerfCounter::PendingActionsCount);
}
EpochManager(const EpochManager&) = delete;
EpochManager& operator=(const EpochManager&) = delete;
private:
using EpochCounterManager = EpochCounterManager<EpochQueue>;
using ProcessingThread = Utils::RunningThread<std::function<void()>>;
// Enqueues a new epoch whose counter value is last counter + 1.
// This is called from the server side.
void Add()
{
// Incrementing the global epoch counter before incrementing per-connection
// epoch counter is safe (not so the other way around). If the server process is
// registering an action at the m_currentEpochCounter in RegisterAction(),
// it is happening in the "future," and this means that if the client is referencing
// the memory to be deleted in the "future," it will be safe.
++m_currentEpochCounter;
m_epochCounterManager.AddNewEpoch();
}
// Dequeues any epochs whose ref counter is 0, meaning there is no reference at that time.
void Remove()
{
const auto oldestEpochCounter = m_epochCounterManager.RemoveUnreferenceEpochCounters();
const auto numActionsPerformed = m_epochActionManager.PerformActions(oldestEpochCounter);
m_perfData.Subtract(ServerPerfCounter::PendingActionsCount, numActionsPerformed);
m_perfData.Set(ServerPerfCounter::LastPerformedActionsCount, numActionsPerformed);
m_perfData.Set(ServerPerfCounter::OldestEpochCounterInQueue, oldestEpochCounter);
m_perfData.Set(ServerPerfCounter::LatestEpochCounterInQueue, m_currentEpochCounter);
}
// Reference to the performance data.
ServerPerfData& m_perfData;
// Configuration related to epoch manager.
EpochManagerConfig m_config;
// The global current epoch counter.
std::atomic_uint64_t m_currentEpochCounter;
// Epoch queue.
EpochQueue m_epochQueue;
// Handles adding/decrementing ref counts.
EpochRefManager m_epochRefManager;
// Handles adding new epoch and finding the epoch counts that have zero ref counts.
EpochCounterManager m_epochCounterManager;
// Handles registering/performing actions.
EpochActionManager m_epochActionManager;
// Thread responsible for updating the current epoch counter,
// removing the unreferenced epoch counter, etc.
// Should be the last member so that it gets destroyed first.
ProcessingThread m_processingThread;
};
} // namespace LocalMemory
} // namespace L4

Просмотреть файл

@ -0,0 +1,89 @@
#pragma once
#include <boost/any.hpp>
#include <memory>
#include <vector>
#include "Epoch/IEpochActionManager.h"
#include "HashTable/Config.h"
#include "HashTable/ReadWrite/HashTable.h"
#include "HashTable/Cache/HashTable.h"
#include "Utils/Containers.h"
#include "Utils/Exception.h"
namespace L4
{
namespace LocalMemory
{
class HashTableManager
{
public:
template <typename Allocator>
std::size_t Add(
const HashTableConfig& config,
IEpochActionManager& epochActionManager,
Allocator allocator)
{
if (m_hashTableNameToIndex.find(config.m_name) != m_hashTableNameToIndex.end())
{
throw RuntimeException("Same hash table name already exists.");
}
using namespace HashTable;
using InternalHashTable = ReadWrite::WritableHashTable<Allocator>::HashTable;
auto internalHashTable = std::make_shared<InternalHashTable>(
InternalHashTable::Setting{
config.m_setting.m_numBuckets,
(std::max)(config.m_setting.m_numBucketsPerMutex.get_value_or(1U), 1U),
config.m_setting.m_fixedKeySize.get_value_or(0U),
config.m_setting.m_fixedValueSize.get_value_or(0U) },
allocator);
// TODO: Create from a serializer.
const auto& cacheConfig = config.m_cache;
auto hashTable =
cacheConfig
? std::make_unique<Cache::WritableHashTable<Allocator>>(
*internalHashTable,
epochActionManager,
cacheConfig->m_maxCacheSizeInBytes,
cacheConfig->m_recordTimeToLive,
cacheConfig->m_forceTimeBasedEviction)
: std::make_unique<ReadWrite::WritableHashTable<Allocator>>(
*internalHashTable,
epochActionManager);
m_internalHashTables.emplace_back(std::move(internalHashTable));
m_hashTables.emplace_back(std::move(hashTable));
const auto newIndex = m_hashTables.size() - 1;
m_hashTableNameToIndex.emplace(config.m_name, newIndex);
return newIndex;
}
IWritableHashTable& GetHashTable(const char* name)
{
assert(m_hashTableNameToIndex.find(name) != m_hashTableNameToIndex.cend());
return GetHashTable(m_hashTableNameToIndex.find(name)->second);
}
IWritableHashTable& GetHashTable(std::size_t index)
{
assert(index < m_hashTables.size());
return *m_hashTables[index];
}
private:
Utils::StdStringKeyMap<std::size_t> m_hashTableNameToIndex;
std::vector<boost::any> m_internalHashTables;
std::vector<std::unique_ptr<IWritableHashTable>> m_hashTables;
};
} // namespace LocalMemory
} // namespace L4

Просмотреть файл

@ -0,0 +1,46 @@
#pragma once
#include "Context.h"
#include "EpochManager.h"
#include "HashTable/Config.h"
#include "Log/PerfCounter.h"
namespace L4
{
namespace LocalMemory
{
class HashTableService
{
public:
explicit HashTableService(
const EpochManagerConfig& epochManagerConfig = EpochManagerConfig())
: m_epochManager{ epochManagerConfig, m_serverPerfData }
{}
template <typename Allocator = std::allocator<void>>
std::size_t AddHashTable(
const HashTableConfig& config,
Allocator allocator = Allocator())
{
return m_hashTableManager.Add(config, m_epochManager, allocator);
}
Context GetContext()
{
return Context(m_hashTableManager, m_epochManager.GetEpochRefManager());
}
private:
ServerPerfData m_serverPerfData;
HashTableManager m_hashTableManager;
// Make sure HashTableManager is destroyed before EpochManager b/c
// it is possible that EpochManager could be processing Epoch Actions
// on hash tables.
EpochManager m_epochManager;
};
} // namespace LocalMemory
} // namespace L4

38
inc/L4/Log/IPerfLogger.h Normal file
Просмотреть файл

@ -0,0 +1,38 @@
#pragma once
#include <map>
#include "PerfCounter.h"
namespace L4
{
// IPerfLogger interface.
struct IPerfLogger
{
struct IData;
virtual ~IPerfLogger() = default;
virtual void Log(const IData& data) = 0;
};
// IPerfLogger::IData interface that provides access to ServerPerfData and the aggregated HashTablePerfData.
// Note that the user of IPerfLogger only needs to implement IPerfLogger since IPerfLogger::IData is
// implemented internally.
struct IPerfLogger::IData
{
using HashTablesPerfData = std::map<
std::string,
std::reference_wrapper<const HashTablePerfData>>;
virtual ~IData() = default;
virtual const ServerPerfData& GetServerPerfData() const = 0;
virtual const HashTablesPerfData& GetHashTablesPerfData() const = 0;
};
} // namespace L4

216
inc/L4/Log/PerfCounter.h Normal file
Просмотреть файл

@ -0,0 +1,216 @@
#pragma once
#include <cstdint>
#include <algorithm>
#include <array>
#include <limits>
#include <atomic>
namespace L4
{
enum class ServerPerfCounter : std::uint16_t
{
// Connection Manager
ClientConnectionsCount = 0U,
// EpochManager
OldestEpochCounterInQueue,
LatestEpochCounterInQueue,
PendingActionsCount,
LastPerformedActionsCount,
Count
};
const std::array<
const char*,
static_cast<std::uint16_t>(ServerPerfCounter::Count)> c_serverPerfCounterNames =
{
// Connection Manager
"ClientConnectionsCount",
// EpochManager
"OldestEpochCounterInQueue",
"LatestEpochCounterInQueue",
"PendingActionsCount",
"LastPerformedActionsCount"
};
enum class HashTablePerfCounter : std::uint16_t
{
RecordsCount = 0U,
BucketsCount,
TotalKeySize,
TotalValueSize,
TotalIndexSize,
ChainingEntriesCount,
// Max/Min counters are always increasing. In other words, we don't keep track
// of the next max record size, when the max record is deleted.
MinKeySize,
MaxKeySize,
MinValueSize,
MaxValueSize,
MaxBucketChainLength,
RecordsCountLoadedFromSerializer,
RecordsCountSavedFromSerializer,
// CacheHashTable specific counters.
CacheHitCount,
CacheMissCount,
EvictedRecordsCount,
Count
};
const std::array<
const char*,
static_cast<std::uint16_t>(HashTablePerfCounter::Count)> c_hashTablePerfCounterNames =
{
"RecordsCount",
"BucketsCount",
"TotalKeySize",
"TotalValueSize",
"TotalIndexSize",
"ChainingEntriesCount",
"MinKeySize",
"MaxKeySize",
"MinValueSize",
"MaxValueSize",
"MaxBucketChainLength",
"RecordsCountLoadedFromSerializer",
"RecordsCountSavedFromSerializer",
"CacheHitCount",
"CacheMissCount",
"EvictedRecordsCount"
};
template <typename TCounterEnum>
class PerfCounters
{
public:
typedef std::int64_t TValue;
typedef std::atomic<TValue> TCounter;
PerfCounters()
{
std::for_each(
std::begin(m_counters),
std::end(m_counters),
[] (TCounter& counter)
{
counter = 0;
});
}
// Note that since the ordering doesn't matter when the counter is updated, memory_order_relaxed
// is used for all perf counter updates.
// More from http://en.cppreference.com/w/cpp/atomic/memory_order:
// Typical use for relaxed memory ordering is updating counters, such as the reference counters
// of std::shared_ptr, since this only requires atomicity, but not ordering or synchronization.
TValue Get(TCounterEnum counterEnum) const
{
return m_counters[static_cast<std::uint16_t>(counterEnum)].load(std::memory_order_relaxed);
}
void Set(TCounterEnum counterEnum, TValue value)
{
m_counters[static_cast<std::uint16_t>(counterEnum)].store(value, std::memory_order_relaxed);
}
void Increment(TCounterEnum counterEnum)
{
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_add(1, std::memory_order_relaxed);
}
void Decrement(TCounterEnum counterEnum)
{
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_sub(1, std::memory_order_relaxed);
}
void Add(TCounterEnum counterEnum, TValue value)
{
if (value != 0)
{
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_add(value, std::memory_order_relaxed);
}
}
void Subtract(TCounterEnum counterEnum, TValue value)
{
if (value != 0)
{
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_sub(value, std::memory_order_relaxed);
}
}
void Max(TCounterEnum counterEnum, TValue value)
{
auto& counter = m_counters[static_cast<std::uint16_t>(counterEnum)];
TValue startValue = counter.load(std::memory_order_acquire);
do
{
// "load()" from counter is needed only once since the value of Max is
// monotonically increasing. If startValue is changed by other threads,
// compare_exchange_strong will return false and startValue will be
// written to the latest value, thus returning to this code path.
if (startValue > value)
{
return;
}
}
while (!counter.compare_exchange_strong(
startValue,
value,
std::memory_order_release,
std::memory_order_acquire));
}
void Min(TCounterEnum counterEnum, TValue value)
{
auto& counter = m_counters[static_cast<std::uint16_t>(counterEnum)];
TValue startValue = counter.load(std::memory_order_acquire);
do
{
// Check the comment in Max() and Min() is monotonically decreasing.
if (startValue < value)
{
return;
}
}
while (!counter.compare_exchange_strong(
startValue,
value,
std::memory_order_release,
std::memory_order_acquire));
}
private:
__declspec(align(8)) TCounter m_counters[TCounterEnum::Count];
};
typedef PerfCounters<ServerPerfCounter> ServerPerfData;
struct HashTablePerfData : public PerfCounters<HashTablePerfCounter>
{
HashTablePerfData()
{
// Initialize any min counters to the max value.
const auto maxValue = (std::numeric_limits<HashTablePerfData::TValue>::max)();
Set(HashTablePerfCounter::MinValueSize, maxValue);
Set(HashTablePerfCounter::MinKeySize, maxValue);
// MaxBucketChainLength starts with 1 since bucket already
// contains the entry which stores the data.
Set(HashTablePerfCounter::MaxBucketChainLength, 1);
}
};
} // namespace L4

56
inc/L4/Log/PerfLogger.h Normal file
Просмотреть файл

@ -0,0 +1,56 @@
#pragma once
#include "IPerfLogger.h"
namespace L4
{
struct PerfLoggerManagerConfig;
// PerfData class, which holds the ServerPerfData and HashTablePerfData for each hash table.
// Note that PerfData owns the ServerPerfData but has only the const references to HashTablePerfData,
// which is owned by the HashTable.
class PerfData : public IPerfLogger::IData
{
public:
PerfData() = default;
ServerPerfData& GetServerPerfData();
const ServerPerfData& GetServerPerfData() const override;
const HashTablesPerfData& GetHashTablesPerfData() const override;
void AddHashTablePerfData(const char* hashTableName, const HashTablePerfData& perfData);
PerfData(const PerfData&) = delete;
PerfData& operator=(const PerfData&) = delete;
private:
ServerPerfData m_serverPerfData;
HashTablesPerfData m_hashTablesPerfData;
};
// PerfData inline implementations.
inline ServerPerfData& PerfData::GetServerPerfData()
{
return m_serverPerfData;
}
inline const ServerPerfData& PerfData::GetServerPerfData() const
{
return m_serverPerfData;
}
inline const PerfData::HashTablesPerfData& PerfData::GetHashTablesPerfData() const
{
return m_hashTablesPerfData;
}
} // namespace L4

Просмотреть файл

@ -0,0 +1,36 @@
#pragma once
#include <cstdint>
#include <cstddef>
namespace L4
{
// IStream interface.
struct IStream
{
virtual ~IStream() {}
virtual void Begin() = 0;
virtual void End() = 0;
};
// IStreamReader interface.
struct IStreamReader : public IStream
{
virtual void Read(std::uint8_t buffer[], std::size_t bufferSize) = 0;
};
// IStreamWriter interface.
struct IStreamWriter : public IStream
{
virtual void Write(const std::uint8_t buffer[], std::size_t bufferSize) = 0;
};
} // namespace L4

Просмотреть файл

@ -0,0 +1,65 @@
#pragma once
#include <cstdint>
#include "IStream.h"
namespace L4
{
// SerializerHelper provides help functions to write to IStreamWriter.
class SerializerHelper
{
public:
SerializerHelper(IStreamWriter& writer)
: m_writer(writer)
{}
SerializerHelper(const SerializerHelper&) = delete;
SerializerHelper& operator=(const SerializerHelper&) = delete;
template <typename T>
void Serialize(const T& obj)
{
m_writer.Write(reinterpret_cast<const std::uint8_t*>(&obj), sizeof(obj));
}
void Serialize(const void* data, std::uint32_t dataSize)
{
m_writer.Write(static_cast<const std::uint8_t*>(data), dataSize);
}
private:
IStreamWriter& m_writer;
};
// DeserializerHelper provides help functions to read from IStreamReader.
class DeserializerHelper
{
public:
DeserializerHelper(IStreamReader& reader)
: m_reader(reader)
{
}
DeserializerHelper(const DeserializerHelper&) = delete;
DeserializerHelper& operator=(const DeserializerHelper&) = delete;
template <typename T>
void Deserialize(T& obj)
{
m_reader.Read(reinterpret_cast<std::uint8_t*>(&obj), sizeof(obj));
}
void Deserialize(void* data, std::uint32_t dataSize)
{
m_reader.Read(static_cast<std::uint8_t*>(data), dataSize);
}
private:
IStreamReader& m_reader;
};
} // namespace L4

Просмотреть файл

@ -0,0 +1,52 @@
#pragma once
#include <atomic>
#include <cstdint>
#include <boost/version.hpp>
#include <boost/interprocess/offset_ptr.hpp>
namespace L4
{
namespace Utils
{
// AtomicOffsetPtr provides a way to atomically update the offset pointer.
// The current boost::interprocess::offset_ptr cannot be used with std::atomic<> because
// the class is not trivially copyable. AtomicOffsetPtr borrows the same concept to calculate
// the pointer address based on the offset (boost::interprocess::ipcdetail::offset_ptr_to* functions
// are reused).
// Note that ->, *, copy/assignment operators are not implemented intentionally so that
// the user (inside this library) is aware of what he is intended to do without accidentally
// incurring any performance hits.
template <typename T>
class AtomicOffsetPtr
{
public:
AtomicOffsetPtr()
: m_offset(1)
{}
AtomicOffsetPtr(const AtomicOffsetPtr&) = delete;
AtomicOffsetPtr& operator=(const AtomicOffsetPtr&) = delete;
T* Load(std::memory_order memoryOrder = std::memory_order_seq_cst) const
{
return static_cast<T*>(
boost::interprocess::ipcdetail::offset_ptr_to_raw_pointer(
this,
m_offset.load(memoryOrder)));
}
void Store(T* ptr, std::memory_order memoryOrder = std::memory_order_seq_cst)
{
m_offset.store(boost::interprocess::ipcdetail::offset_ptr_to_offset(ptr, this), memoryOrder);
}
private:
std::atomic_uint64_t m_offset;
};
} // namespace Utils
} // namespace L4

24
inc/L4/Utils/Clock.h Normal file
Просмотреть файл

@ -0,0 +1,24 @@
#pragma once
#include <chrono>
namespace L4
{
namespace Utils
{
class EpochClock
{
public:
std::chrono::seconds GetCurrentEpochTime() const
{
return std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::high_resolution_clock::now().time_since_epoch());
}
};
} // namespace Utils
} // namespace L4

Просмотреть файл

@ -0,0 +1,69 @@
#pragma once
#include <cctype>
#include <cstdint>
#include <string>
#include <boost\functional\hash.hpp>
namespace L4
{
namespace Utils
{
// CaseInsensitiveStdStringComparer is a STL-compatible case-insensitive ANSI std::string comparer.
struct CaseInsensitiveStdStringComparer
{
bool operator()(const std::string& str1, const std::string& str2) const
{
return _stricmp(str1.c_str(), str2.c_str()) == 0;
}
};
// CaseInsensitiveStringComparer is a STL-compatible case-insensitive ANSI string comparer.
struct CaseInsensitiveStringComparer
{
bool operator()(const char* const str1, const char* const str2) const
{
return _stricmp(str1, str2) == 0;
}
};
// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI std::string hasher.
struct CaseInsensitiveStdStringHasher
{
std::size_t operator()(const std::string& str) const
{
std::size_t seed = 0;
for (auto c : str)
{
boost::hash_combine(seed, std::toupper(c));
}
return seed;
}
};
// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI string hasher.
struct CaseInsensitiveStringHasher
{
std::size_t operator()(const char* str) const
{
assert(str != nullptr);
std::size_t seed = 0;
while (*str)
{
boost::hash_combine(seed, std::toupper(*str++));
}
return seed;
}
};
} // namespace Utils
} // namespace L4

45
inc/L4/Utils/Containers.h Normal file
Просмотреть файл

@ -0,0 +1,45 @@
#pragma once
#include <cstdint>
#include <string>
#include <unordered_map>
#include <boost/functional/hash.hpp>
#include "Utils/ComparerHasher.h"
namespace L4
{
namespace Utils
{
// StdStringKeyMap is an unordered_map where the key is std::string. It is slower than
// StringKeyMap above, but it owns the memory of the string, so it's easier to use.
template <typename TValue>
using StdStringKeyMap = std::unordered_map<
std::string,
TValue,
Utils::CaseInsensitiveStdStringHasher,
Utils::CaseInsensitiveStdStringComparer>;
// StringKeyMap is an unordered_map where the key is const char*.
// The memory of the key is not owned by StringKeyMap,
// but it is faster (than StdStringKeyMap below) for look up.
template <typename TValue>
using StringKeyMap = std::unordered_map<
const char*,
TValue,
Utils::CaseInsensitiveStringHasher,
Utils::CaseInsensitiveStringComparer>;
// IntegerKeyMap using boost::hash and std::equal_to comparer and hasher.
template <typename TKey, typename TValue>
using IntegerKeyMap = std::unordered_map<
TKey,
TValue,
boost::hash<TKey>,
std::equal_to<TKey>>;
} // namespace Utils
} // namespace L4

21
inc/L4/Utils/Exception.h Normal file
Просмотреть файл

@ -0,0 +1,21 @@
#pragma once
#include <stdexcept>
namespace L4
{
// RuntimeException class used across L4 library.
class RuntimeException : public std::runtime_error
{
public:
explicit RuntimeException(const std::string& message)
: std::runtime_error(message.c_str())
{}
explicit RuntimeException(const char* message)
: std::runtime_error(message)
{}
};
} // namespace L4

88
inc/L4/Utils/Lock.h Normal file
Просмотреть файл

@ -0,0 +1,88 @@
#pragma once
#include "Utils/Windows.h"
namespace L4
{
namespace Utils
{
// Represents a RAII wrapper for Win32 CRITICAL_SECTION.
class CriticalSection : protected ::CRITICAL_SECTION
{
public:
// Constructs and initializes the critical section.
CriticalSection()
{
::InitializeCriticalSection(this);
}
CriticalSection(const CriticalSection& other) = delete;
CriticalSection& operator=(const CriticalSection& other) = delete;
// Destructs the critical section.
~CriticalSection()
{
::DeleteCriticalSection(this);
}
// Waits for ownership of the critical section.
void lock()
{
::EnterCriticalSection(this);
}
// Releases ownership of the critical section.
void unlock()
{
::LeaveCriticalSection(this);
}
};
// Represents a RAII wrapper for Win32 SRW lock.
class ReaderWriterLockSlim
{
public:
// Constructs and initializes an SRW lock.
ReaderWriterLockSlim()
{
::InitializeSRWLock(&m_lock);
}
ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete;
ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete;
// Acquires an SRW lock in shared mode.
void lock_shared()
{
::AcquireSRWLockShared(&m_lock);
}
// Acquires an SRW lock in exclusive mode.
void lock()
{
::AcquireSRWLockExclusive(&m_lock);
}
// Releases an SRW lock that was opened in shared mode.
void unlock_shared()
{
::ReleaseSRWLockShared(&m_lock);
}
// Releases an SRW lock that was opened in exclusive mode.
void unlock()
{
::ReleaseSRWLockExclusive(&m_lock);
}
private:
// Stores the Win32 SRW lock.
::SRWLOCK m_lock;
};
} // namespace Utils
} // namespace L4

79
inc/L4/Utils/Math.h Normal file
Просмотреть файл

@ -0,0 +1,79 @@
#pragma once
#include <cstdint>
#include <cstddef>
#include <complex>
namespace L4
{
namespace Utils
{
namespace Math
{
// Rounds up the number to the nearest multiple of base.
inline std::uint64_t RoundUp(std::uint64_t number, std::uint64_t base)
{
return base ? (((number + base - 1) / base) * base) : number;
}
// Rounds down the number to the nearest multiple of base.
inline std::uint64_t RoundDown(std::uint64_t number, std::uint64_t base)
{
return base ? ((number / base) * base) : number;
}
// Returns true if the given number is a power of 2.
inline bool IsPowerOfTwo(std::uint64_t number)
{
return number && ((number & (number - 1)) == 0);
}
// Returns the next highest power of two from the given value.
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2.
inline std::uint32_t NextHighestPowerOfTwo(std::uint32_t val)
{
--val;
val |= val >> 1;
val |= val >> 2;
val |= val >> 4;
val |= val >> 8;
val |= val >> 16;
return ++val;
}
// Provides utility functions doing pointer related arithmetics.
namespace PointerArithmetic
{
// Returns a new pointer after adding an offset.
template <typename T>
inline T* Add(T* ptr, std::size_t offset)
{
return reinterpret_cast<T*>(reinterpret_cast<std::uintptr_t>(ptr) + offset);
}
// Returns a new pointer after subtracting an offset.
template <typename T>
inline T* Subtract(T* ptr, std::size_t offset)
{
return reinterpret_cast<T*>(reinterpret_cast<std::uintptr_t>(ptr) - offset);
}
// Returns the absolute value of difference in the number of bytes between two pointers.
inline std::size_t Distance(const void* lhs, const void* rhs)
{
return std::abs(reinterpret_cast<std::ptrdiff_t>(lhs) - reinterpret_cast<std::ptrdiff_t>(rhs));
}
} // namespace PointerArithmetic
} // namespace Math
} // namespace Utils
} // namespace L4

Просмотреть файл

@ -0,0 +1,37 @@
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#ifndef _MURMURHASH3_H_
#define _MURMURHASH3_H_
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef unsigned char uint8_t;
typedef unsigned long uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER) && (_MSC_VER < 1600)
#include <stdint.h>
#endif // !defined(_MSC_VER) || (_MSC_VER >= 1600)
//-----------------------------------------------------------------------------
void MurmurHash3_x86_32(const void * key, int len, uint32_t seed, void * out);
void MurmurHash3_x86_128(const void * key, int len, uint32_t seed, void * out);
void MurmurHash3_x64_128(const void * key, int len, uint32_t seed, void * out);
//-----------------------------------------------------------------------------
#endif // _MURMURHASH3_H_

56
inc/L4/Utils/Properties.h Normal file
Просмотреть файл

@ -0,0 +1,56 @@
#pragma once
#include "Utils/Containers.h"
#include <boost/lexical_cast.hpp>
namespace L4
{
namespace Utils
{
// Properties class represents a string to string map (case insensitive).
// It can be used where the configurations should be generic.
class Properties : public StdStringKeyMap<std::string>
{
public:
using Base = Utils::StdStringKeyMap<std::string>;
using Value = Base::value_type;
Properties() = default;
// Expose a constructor with initializer_list for convenience.
Properties(std::initializer_list<Value> values)
: Base(values)
{
}
// Returns true if the given key exists and the value associated with
// the key can be converted to the TValue type. If the conversion fails, the value
// of the given val is guaranteed to remain the same.
template <typename TValue>
bool TryGet(const std::string& key, TValue& val) const
{
const auto it = find(key);
if (it == end())
{
return false;
}
TValue tmp;
if (!boost::conversion::try_lexical_convert(it->second, tmp))
{
return false;
}
val = tmp;
return true;
}
};
} // namespace Utils
} // namespace L4

Просмотреть файл

@ -0,0 +1,79 @@
#pragma once
#include <chrono>
#include <cstdint>
#include <thread>
#include <atomic>
namespace L4
{
namespace Utils
{
// NoOp is a function object that doesn't do anything.
struct NoOp
{
void operator()(...) {}
};
// RunningThread wraps around std::thread and repeatedly runs a given function after yielding
// for the given interval. Note that the destructor waits for the thread to stop.
template <typename CoreFunc, typename PrepFunc = NoOp>
class RunningThread
{
public:
RunningThread(
std::chrono::milliseconds interval,
CoreFunc coreFunc,
PrepFunc prepFunc = PrepFunc())
: m_isRunning(),
m_thread(
&RunningThread::Start,
this,
interval,
coreFunc,
prepFunc)
{
}
~RunningThread()
{
m_isRunning.store(false);
if (m_thread.joinable())
{
m_thread.join();
}
}
RunningThread(const RunningThread&) = delete;
RunningThread& operator=(const RunningThread&) = delete;
private:
void Start(
std::chrono::milliseconds interval,
CoreFunc coreFunc,
PrepFunc prepFunc)
{
m_isRunning.store(true);
prepFunc();
while (m_isRunning.load())
{
coreFunc();
std::this_thread::sleep_for(interval);
}
}
std::atomic_bool m_isRunning;
std::thread m_thread;
};
} // namespace Utils
} // namespace L4

48
inc/L4/Utils/Time.h Normal file
Просмотреть файл

@ -0,0 +1,48 @@
#pragma once
#include <cstdint>
#include "SharedMemoryHashTable/Utils/Windows.h"
namespace Ads
{
namespace DE
{
namespace SharedMemory
{
namespace Utils
{
namespace Time
{
// Returns the current high resolution system counter value.
inline std::uint64_t GetCurrentSystemCounter()
{
LARGE_INTEGER counter;
QueryPerformanceCounter(&counter);
return counter.QuadPart;
}
// Returns how many ticks there are in the given resolution interval.
// Note that the given resolution interval is in the same unit as NtQueryTimerResolution(),
// which is 1/10000 ms. Thus, 10000 translates to 1 ms.
// Note that this function is based on boost::interprocess::ipcdetail::get_system_tick_in_highres_counts().
inline std::uint32_t GetSystemTicks(std::uint32_t resolutionInterval)
{
// Frequency in counts per second.
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
std::int64_t femtoSecondsInOneCount = (1000000000000000LL - 1LL) / freq.QuadPart + 1LL;
// Calculate the ticks count perf given resolution interval.
return static_cast<std::uint32_t>(
(static_cast<std::int64_t>(resolutionInterval) * 100000000LL - 1LL) / femtoSecondsInOneCount + 1LL);
}
} // namespace Time
} // namespace Utils
} // namespace SharedMemory
} // namespace DE
} // namespace Ads

57
inc/L4/Utils/Windows.h Normal file
Просмотреть файл

@ -0,0 +1,57 @@
#pragma once
// Allow macro redefinition.
#pragma warning(push)
#pragma warning(disable:4005)
// Explicitly excluding API groups
//#define NOGDICAPMASKS // - CC_*, LC_*, PC_*, CP_*, TC_*, RC_
#define NOVIRTUALKEYCODES // - VK_*
//#define NOWINMESSAGES // - WM_*, EM_*, LB_*, CB_*
#define NOWINSTYLES // - WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_*
#define NOSYSMETRICS // - SM_*
#define NOMENUS // - MF_*
#define NOICONS // - IDI_*
#define NOKEYSTATES // - MK_*
#define NOSYSCOMMANDS // - SC_*
#define NORASTEROPS // - Binary and Tertiary raster ops
#define NOSHOWWINDOW // - SW_*
#define OEMRESOURCE // - OEM Resource values
#define NOATOM // - Atom Manager routines
#define NOCLIPBOARD // - Clipboard routines
#define NOCOLOR // - Screen colors
//#define NOCTLMGR // - Control and Dialog routines
#define NODRAWTEXT // - DrawText() and DT_*
#define NOGDI // - All GDI defines and routines
#define NOKERNEL // - All KERNEL defines and routines
#define NONLS // - All NLS (natural language interfaces) defines and routines
#define NOMB // - MB_* and MessageBox()
#define NOMEMMGR // - GMEM_*, LMEM_*, GHND, LHND, associated routines
#define NOMETAFILE // - typedef METAFILEPICT
#define NOMINMAX // - Macros min(a,b) and max(a,b)
//#define NOMSG // - typedef MSG and associated routines
#define NOOPENFILE // - OpenFile(), OemToAnsi, AnsiToOem, and OF_*
#define NOSCROLL // - SB_* and scrolling routines
#define NOSERVICE // - All Service Controller routines, SERVICE_ equates, etc.
#define NOSOUND // - Sound driver routines
#define NOTEXTMETRIC // - typedef TEXTMETRIC and associated routines
#define NOWH // - SetWindowsHook and WH_*
#define NOWINOFFSETS // - GWL_*, GCL_*, associated routines
#define NOCOMM // - COMM driver routines
#define NOKANJI // - Kanji support stuff.
#define NOHELP // - Help engine interface.
#define NOPROFILER // - Profiler interface.
#define NODEFERWINDOWPOS // - DeferWindowPos routines
#define NOMCX // - Modem Configuration Extensions
// Enabling STRICT redefines certain data types so that the compiler does not permit assignment from one type to another without an explicit cast.
#define STRICT
// Define WIN32_LEAN_AND_MEAN to exclude APIs such as Cryptography, DDE, RPC, Shell, and Windows Sockets.
// Cryptography is needed due to <boost/uuids/random_generator.hpp>
//#define WIN32_LEAN_AND_MEAN
#pragma warning(pop)
#include <Windows.h>

Просмотреть файл

@ -0,0 +1,15 @@
#pragma once
#include <boost/interprocess/detail/utilities.hpp>
namespace L4
{
namespace Detail
{
using boost::interprocess::ipcdetail::to_raw_pointer;
} // namespace Detail
} // namespace L4

Просмотреть файл

@ -0,0 +1,86 @@
#include "Epoch/EpochActionManager.h"
#include "Utils/Math.h"
#include <cassert>
#include <thread>
namespace L4
{
// EpochActionManager class implementation.
EpochActionManager::EpochActionManager(std::uint8_t numActionQueues)
: m_epochToActionsList{}
, m_counter{}
{
// Calculate numActionQueues as the next highest power of two.
std::uint16_t newNumActionQueues = numActionQueues;
if (numActionQueues == 0U)
{
newNumActionQueues = static_cast<std::uint16_t>(std::thread::hardware_concurrency());
}
newNumActionQueues = static_cast<std::uint16_t>(Utils::Math::NextHighestPowerOfTwo(newNumActionQueues));
assert(newNumActionQueues != 0U && Utils::Math::IsPowerOfTwo(newNumActionQueues));
// Initialize m_epochToActionsList.
m_epochToActionsList.resize(newNumActionQueues);
for (auto& epochToActions : m_epochToActionsList)
{
std::get<0>(epochToActions) = std::make_unique<Mutex>();
}
}
void EpochActionManager::RegisterAction(std::uint64_t epochCounter, IEpochActionManager::Action&& action)
{
std::uint32_t index = ++m_counter & (m_epochToActionsList.size() - 1);
auto& epochToActions = m_epochToActionsList[index];
Lock lock(*std::get<0>(epochToActions));
std::get<1>(epochToActions)[epochCounter].emplace_back(std::move(action));
}
std::uint64_t EpochActionManager::PerformActions(std::uint64_t epochCounter)
{
// Actions will be moved here and performed without a lock.
Actions actionsToPerform;
for (auto& epochToActionsWithLock : m_epochToActionsList)
{
Lock lock(*std::get<0>(epochToActionsWithLock));
// lower_bound() so that it is deleted up to but not including epochCounter.
auto& epochToActions = std::get<1>(epochToActionsWithLock);
const auto endIt = epochToActions.lower_bound(epochCounter);
auto it = epochToActions.begin();
while (it != endIt)
{
actionsToPerform.insert(
actionsToPerform.end(),
std::make_move_iterator(it->second.begin()),
std::make_move_iterator(it->second.end()));
// The following post increment is intentional to avoid iterator invalidation issue.
epochToActions.erase(it++);
}
}
ApplyActions(actionsToPerform);
return actionsToPerform.size();
}
void EpochActionManager::ApplyActions(Actions& actions)
{
for (auto& action : actions)
{
action();
}
}
} // namespace L4

334
src/MurmurHash3.cpp Normal file
Просмотреть файл

@ -0,0 +1,334 @@
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
// Note - The x86 and x64 versions do _not_ produce the same results, as the
// algorithms are optimized for their respective platforms. You can still
// compile and run any of them on any platform, but your performance with the
// non-native version will be less than optimal.
#include "Utils/MurmurHash3.h"
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define FORCE_INLINE __forceinline
#include <stdlib.h>
#define ROTL32(x,y) _rotl(x,y)
#define ROTL64(x,y) _rotl64(x,y)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#define FORCE_INLINE __attribute__((always_inline))
inline uint32_t rotl32(uint32_t x, int8_t r)
{
return (x << r) | (x >> (32 - r));
}
inline uint64_t rotl64(uint64_t x, int8_t r)
{
return (x << r) | (x >> (64 - r));
}
#define ROTL32(x,y) rotl32(x,y)
#define ROTL64(x,y) rotl64(x,y)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
FORCE_INLINE uint32_t getblock(const uint32_t * p, int i)
{
return p[i];
}
FORCE_INLINE uint64_t getblock(const uint64_t * p, int i)
{
return p[i];
}
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
FORCE_INLINE uint32_t fmix(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
//----------
FORCE_INLINE uint64_t fmix(uint64_t k)
{
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
//-----------------------------------------------------------------------------
void MurmurHash3_x86_32(const void * key, int len,
uint32_t seed, void * out)
{
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 4;
uint32_t h1 = seed;
uint32_t c1 = 0xcc9e2d51;
uint32_t c2 = 0x1b873593;
//----------
// body
const uint32_t * blocks = (const uint32_t *)(data + nblocks * 4);
for (int i = -nblocks; i; i++)
{
uint32_t k1 = getblock(blocks, i);
k1 *= c1;
k1 = ROTL32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1, 13);
h1 = h1 * 5 + 0xe6546b64;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks * 4);
uint32_t k1 = 0;
switch (len & 3)
{
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0];
k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len;
h1 = fmix(h1);
*(uint32_t*)out = h1;
}
//-----------------------------------------------------------------------------
void MurmurHash3_x86_128(const void * key, const int len,
uint32_t seed, void * out)
{
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 16;
uint32_t h1 = seed;
uint32_t h2 = seed;
uint32_t h3 = seed;
uint32_t h4 = seed;
uint32_t c1 = 0x239b961b;
uint32_t c2 = 0xab0e9789;
uint32_t c3 = 0x38b34ae5;
uint32_t c4 = 0xa1e38b93;
//----------
// body
const uint32_t * blocks = (const uint32_t *)(data + nblocks * 16);
for (int i = -nblocks; i; i++)
{
uint32_t k1 = getblock(blocks, i * 4 + 0);
uint32_t k2 = getblock(blocks, i * 4 + 1);
uint32_t k3 = getblock(blocks, i * 4 + 2);
uint32_t k4 = getblock(blocks, i * 4 + 3);
k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1;
h1 = ROTL32(h1, 19); h1 += h2; h1 = h1 * 5 + 0x561ccd1b;
k2 *= c2; k2 = ROTL32(k2, 16); k2 *= c3; h2 ^= k2;
h2 = ROTL32(h2, 17); h2 += h3; h2 = h2 * 5 + 0x0bcaa747;
k3 *= c3; k3 = ROTL32(k3, 17); k3 *= c4; h3 ^= k3;
h3 = ROTL32(h3, 15); h3 += h4; h3 = h3 * 5 + 0x96cd1c35;
k4 *= c4; k4 = ROTL32(k4, 18); k4 *= c1; h4 ^= k4;
h4 = ROTL32(h4, 13); h4 += h1; h4 = h4 * 5 + 0x32ac3b17;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks * 16);
uint32_t k1 = 0;
uint32_t k2 = 0;
uint32_t k3 = 0;
uint32_t k4 = 0;
switch (len & 15)
{
case 15: k4 ^= tail[14] << 16;
case 14: k4 ^= tail[13] << 8;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = ROTL32(k4, 18); k4 *= c1; h4 ^= k4;
case 12: k3 ^= tail[11] << 24;
case 11: k3 ^= tail[10] << 16;
case 10: k3 ^= tail[9] << 8;
case 9: k3 ^= tail[8] << 0;
k3 *= c3; k3 = ROTL32(k3, 17); k3 *= c4; h3 ^= k3;
case 8: k2 ^= tail[7] << 24;
case 7: k2 ^= tail[6] << 16;
case 6: k2 ^= tail[5] << 8;
case 5: k2 ^= tail[4] << 0;
k2 *= c2; k2 = ROTL32(k2, 16); k2 *= c3; h2 ^= k2;
case 4: k1 ^= tail[3] << 24;
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0] << 0;
k1 *= c1; k1 = ROTL32(k1, 15); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
h1 = fmix(h1);
h2 = fmix(h2);
h3 = fmix(h3);
h4 = fmix(h4);
h1 += h2; h1 += h3; h1 += h4;
h2 += h1; h3 += h1; h4 += h1;
((uint32_t*)out)[0] = h1;
((uint32_t*)out)[1] = h2;
((uint32_t*)out)[2] = h3;
((uint32_t*)out)[3] = h4;
}
//-----------------------------------------------------------------------------
void MurmurHash3_x64_128(const void * key, const int len,
const uint32_t seed, void * out)
{
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 16;
uint64_t h1 = seed;
uint64_t h2 = seed;
uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
//----------
// body
const uint64_t * blocks = (const uint64_t *)(data);
for (int i = 0; i < nblocks; i++)
{
uint64_t k1 = getblock(blocks, i * 2 + 0);
uint64_t k2 = getblock(blocks, i * 2 + 1);
k1 *= c1; k1 = ROTL64(k1, 31); k1 *= c2; h1 ^= k1;
h1 = ROTL64(h1, 27); h1 += h2; h1 = h1 * 5 + 0x52dce729;
k2 *= c2; k2 = ROTL64(k2, 33); k2 *= c1; h2 ^= k2;
h2 = ROTL64(h2, 31); h2 += h1; h2 = h2 * 5 + 0x38495ab5;
}
//----------
// tail
const uint8_t * tail = (const uint8_t*)(data + nblocks * 16);
uint64_t k1 = 0;
uint64_t k2 = 0;
switch (len & 15)
{
case 15: k2 ^= uint64_t(tail[14]) << 48;
case 14: k2 ^= uint64_t(tail[13]) << 40;
case 13: k2 ^= uint64_t(tail[12]) << 32;
case 12: k2 ^= uint64_t(tail[11]) << 24;
case 11: k2 ^= uint64_t(tail[10]) << 16;
case 10: k2 ^= uint64_t(tail[9]) << 8;
case 9: k2 ^= uint64_t(tail[8]) << 0;
k2 *= c2; k2 = ROTL64(k2, 33); k2 *= c1; h2 ^= k2;
case 8: k1 ^= uint64_t(tail[7]) << 56;
case 7: k1 ^= uint64_t(tail[6]) << 48;
case 6: k1 ^= uint64_t(tail[5]) << 40;
case 5: k1 ^= uint64_t(tail[4]) << 32;
case 4: k1 ^= uint64_t(tail[3]) << 24;
case 3: k1 ^= uint64_t(tail[2]) << 16;
case 2: k1 ^= uint64_t(tail[1]) << 8;
case 1: k1 ^= uint64_t(tail[0]) << 0;
k1 *= c1; k1 = ROTL64(k1, 31); k1 *= c2; h1 ^= k1;
};
//----------
// finalization
h1 ^= len; h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix(h1);
h2 = fmix(h2);
h1 += h2;
h2 += h1;
((uint64_t*)out)[0] = h1;
((uint64_t*)out)[1] = h2;
}
//-----------------------------------------------------------------------------

25
src/PerfLogger.cpp Normal file
Просмотреть файл

@ -0,0 +1,25 @@
#include "Log/PerfLogger.h"
#include "Utils/Exception.h"
#include <boost/format.hpp>
namespace L4
{
// PerfData class implementation.
void PerfData::AddHashTablePerfData(const char* hashTableName, const HashTablePerfData& perfData)
{
auto result = m_hashTablesPerfData.insert(
std::make_pair(
hashTableName,
HashTablesPerfData::mapped_type(perfData)));
if (!result.second)
{
boost::format err("Duplicate hash table name found: '%1%'.");
err % hashTableName;
throw RuntimeException(err.str());
}
}
} // namespace L4