зеркало из https://github.com/microsoft/L4.git
Apply clang-format (Chromium) (#13)
This commit is contained in:
Родитель
32a7737afe
Коммит
64e70ac102
|
@ -67,3 +67,5 @@ ipch/
|
||||||
*.vsp
|
*.vsp
|
||||||
*.vspx
|
*.vspx
|
||||||
*.sap
|
*.sap
|
||||||
|
*.htm
|
||||||
|
*.user
|
||||||
|
|
1146
Benchmark/main.cpp
1146
Benchmark/main.cpp
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -6,95 +6,84 @@
|
||||||
|
|
||||||
using namespace L4;
|
using namespace L4;
|
||||||
|
|
||||||
void SimpleExample()
|
void SimpleExample() {
|
||||||
{
|
EpochManagerConfig epochConfig{1000, std::chrono::milliseconds(100), 1};
|
||||||
EpochManagerConfig epochConfig{ 1000, std::chrono::milliseconds(100), 1 };
|
LocalMemory::HashTableService service{epochConfig};
|
||||||
LocalMemory::HashTableService service{ epochConfig };
|
|
||||||
|
|
||||||
auto hashTableIndex = service.AddHashTable(
|
auto hashTableIndex = service.AddHashTable(
|
||||||
HashTableConfig("Table1", HashTableConfig::Setting{ 1000000 }));
|
HashTableConfig("Table1", HashTableConfig::Setting{1000000}));
|
||||||
|
|
||||||
std::vector<std::pair<std::string, std::string>> keyValuePairs =
|
std::vector<std::pair<std::string, std::string>> keyValuePairs = {
|
||||||
{
|
{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"},
|
||||||
{ "key1", "value1" },
|
{"key4", "value4"}, {"key5", "value5"},
|
||||||
{ "key2", "value2" },
|
};
|
||||||
{ "key3", "value3" },
|
|
||||||
{ "key4", "value4" },
|
|
||||||
{ "key5", "value5" },
|
|
||||||
};
|
|
||||||
|
|
||||||
// Write data.
|
// Write data.
|
||||||
{
|
{
|
||||||
auto context = service.GetContext();
|
auto context = service.GetContext();
|
||||||
auto& hashTable = context[hashTableIndex];
|
auto& hashTable = context[hashTableIndex];
|
||||||
|
|
||||||
for (const auto& keyValuePair : keyValuePairs)
|
for (const auto& keyValuePair : keyValuePairs) {
|
||||||
{
|
const auto& keyStr = keyValuePair.first;
|
||||||
const auto& keyStr = keyValuePair.first;
|
const auto& valStr = keyValuePair.second;
|
||||||
const auto& valStr = keyValuePair.second;
|
|
||||||
|
|
||||||
IWritableHashTable::Key key;
|
IWritableHashTable::Key key;
|
||||||
key.m_data = reinterpret_cast<const std::uint8_t*>(keyStr.c_str());
|
key.m_data = reinterpret_cast<const std::uint8_t*>(keyStr.c_str());
|
||||||
key.m_size = keyStr.size();
|
key.m_size = keyStr.size();
|
||||||
|
|
||||||
IWritableHashTable::Value val;
|
IWritableHashTable::Value val;
|
||||||
val.m_data = reinterpret_cast<const std::uint8_t*>(valStr.c_str());
|
val.m_data = reinterpret_cast<const std::uint8_t*>(valStr.c_str());
|
||||||
val.m_size = valStr.size();
|
val.m_size = valStr.size();
|
||||||
|
|
||||||
hashTable.Add(key, val);
|
hashTable.Add(key, val);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Read data.
|
// Read data.
|
||||||
{
|
{
|
||||||
auto context = service.GetContext();
|
auto context = service.GetContext();
|
||||||
|
|
||||||
// Once a context is retrieved, the operations such as
|
// Once a context is retrieved, the operations such as
|
||||||
// operator[] on the context and Get() are lock-free.
|
// operator[] on the context and Get() are lock-free.
|
||||||
auto& hashTable = context[hashTableIndex];
|
auto& hashTable = context[hashTableIndex];
|
||||||
|
|
||||||
for (const auto& keyValuePair : keyValuePairs)
|
for (const auto& keyValuePair : keyValuePairs) {
|
||||||
{
|
const auto& keyStr = keyValuePair.first;
|
||||||
const auto& keyStr = keyValuePair.first;
|
|
||||||
|
|
||||||
IWritableHashTable::Key key;
|
IWritableHashTable::Key key;
|
||||||
key.m_data = reinterpret_cast<const std::uint8_t*>(keyStr.c_str());
|
key.m_data = reinterpret_cast<const std::uint8_t*>(keyStr.c_str());
|
||||||
key.m_size = keyStr.size();
|
key.m_size = keyStr.size();
|
||||||
|
|
||||||
IWritableHashTable::Value val;
|
IWritableHashTable::Value val;
|
||||||
hashTable.Get(key, val);
|
hashTable.Get(key, val);
|
||||||
|
|
||||||
std::cout << std::string(reinterpret_cast<const char*>(val.m_data), val.m_size) << std::endl;
|
std::cout << std::string(reinterpret_cast<const char*>(val.m_data),
|
||||||
}
|
val.m_size)
|
||||||
|
<< std::endl;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CacheHashTableExample()
|
void CacheHashTableExample() {
|
||||||
{
|
LocalMemory::HashTableService service;
|
||||||
LocalMemory::HashTableService service;
|
|
||||||
|
|
||||||
HashTableConfig::Cache cacheConfig{
|
HashTableConfig::Cache cacheConfig{
|
||||||
1024 * 1024, // 1MB cache
|
1024 * 1024, // 1MB cache
|
||||||
std::chrono::seconds(60), // Record will exipre in 60 seconds
|
std::chrono::seconds(60), // Record will exipre in 60 seconds
|
||||||
true // Remove any expired records during eviction.
|
true // Remove any expired records during eviction.
|
||||||
};
|
};
|
||||||
|
|
||||||
auto hashTableIndex = service.AddHashTable(
|
auto hashTableIndex = service.AddHashTable(HashTableConfig(
|
||||||
HashTableConfig(
|
"Table1", HashTableConfig::Setting{1000000}, cacheConfig));
|
||||||
"Table1",
|
|
||||||
HashTableConfig::Setting{ 1000000 },
|
|
||||||
cacheConfig));
|
|
||||||
|
|
||||||
(void)hashTableIndex;
|
(void)hashTableIndex;
|
||||||
// Use hash table similar to SimpleExample().
|
// Use hash table similar to SimpleExample().
|
||||||
}
|
}
|
||||||
|
|
||||||
int main()
|
int main() {
|
||||||
{
|
SimpleExample();
|
||||||
SimpleExample();
|
|
||||||
|
|
||||||
CacheHashTableExample();
|
CacheHashTableExample();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,68 +1,56 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <boost/test/unit_test.hpp>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <boost/test/unit_test.hpp>
|
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
struct AllocationAddressHolder : public std::set<void*>
|
struct AllocationAddressHolder : public std::set<void*> {
|
||||||
{
|
~AllocationAddressHolder() { BOOST_REQUIRE(empty()); }
|
||||||
~AllocationAddressHolder()
|
|
||||||
{
|
|
||||||
BOOST_REQUIRE(empty());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename T = void>
|
template <typename T = void>
|
||||||
class CheckedAllocator : public std::allocator<T>
|
class CheckedAllocator : public std::allocator<T> {
|
||||||
{
|
public:
|
||||||
public:
|
using Base = std::allocator<T>;
|
||||||
using Base = std::allocator<T>;
|
using pointer = typename Base::pointer;
|
||||||
using pointer = typename Base::pointer;
|
|
||||||
|
|
||||||
template<class U>
|
template <class U>
|
||||||
struct rebind
|
struct rebind {
|
||||||
{
|
typedef CheckedAllocator<U> other;
|
||||||
typedef CheckedAllocator<U> other;
|
};
|
||||||
};
|
|
||||||
|
|
||||||
CheckedAllocator()
|
CheckedAllocator()
|
||||||
: m_allocationAddresses{ std::make_shared<AllocationAddressHolder>() }
|
: m_allocationAddresses{std::make_shared<AllocationAddressHolder>()} {}
|
||||||
{}
|
|
||||||
|
|
||||||
CheckedAllocator(const CheckedAllocator<T>&) = default;
|
CheckedAllocator(const CheckedAllocator<T>&) = default;
|
||||||
|
|
||||||
template<class U>
|
template <class U>
|
||||||
CheckedAllocator(const CheckedAllocator<U>& other)
|
CheckedAllocator(const CheckedAllocator<U>& other)
|
||||||
: m_allocationAddresses{ other.m_allocationAddresses }
|
: m_allocationAddresses{other.m_allocationAddresses} {}
|
||||||
{}
|
|
||||||
|
|
||||||
template<class U>
|
template <class U>
|
||||||
CheckedAllocator<T>& operator=(const CheckedAllocator<U>& other)
|
CheckedAllocator<T>& operator=(const CheckedAllocator<U>& other) {
|
||||||
{
|
m_allocationAddresses = other.m_allocationAddresses;
|
||||||
m_allocationAddresses = other.m_allocationAddresses;
|
return (*this);
|
||||||
return (*this);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pointer allocate(std::size_t count, std::allocator<void>::const_pointer hint = 0)
|
pointer allocate(std::size_t count,
|
||||||
{
|
std::allocator<void>::const_pointer hint = 0) {
|
||||||
auto address = Base::allocate(count, hint);
|
auto address = Base::allocate(count, hint);
|
||||||
BOOST_REQUIRE(m_allocationAddresses->insert(address).second);
|
BOOST_REQUIRE(m_allocationAddresses->insert(address).second);
|
||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
|
|
||||||
void deallocate(pointer ptr, std::size_t count)
|
void deallocate(pointer ptr, std::size_t count) {
|
||||||
{
|
BOOST_REQUIRE(m_allocationAddresses->erase(ptr) == 1);
|
||||||
BOOST_REQUIRE(m_allocationAddresses->erase(ptr) == 1);
|
Base::deallocate(ptr, count);
|
||||||
Base::deallocate(ptr, count);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<AllocationAddressHolder> m_allocationAddresses;
|
std::shared_ptr<AllocationAddressHolder> m_allocationAddresses;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,80 +1,82 @@
|
||||||
#include <boost/test/unit_test.hpp>
|
#include <boost/test/unit_test.hpp>
|
||||||
#include <mutex>
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include "Utils.h"
|
#include <mutex>
|
||||||
#include "L4/Interprocess/Connection/ConnectionMonitor.h"
|
#include "L4/Interprocess/Connection/ConnectionMonitor.h"
|
||||||
#include "L4/Interprocess/Connection/EndPointInfoUtils.h"
|
#include "L4/Interprocess/Connection/EndPointInfoUtils.h"
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE(ConnectionMonitorTests)
|
BOOST_AUTO_TEST_SUITE(ConnectionMonitorTests)
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(ConnectionMonitorTest)
|
BOOST_AUTO_TEST_CASE(ConnectionMonitorTest) {
|
||||||
{
|
std::vector<Interprocess::Connection::EndPointInfo> endPointsDisconnected;
|
||||||
std::vector<Interprocess::Connection::EndPointInfo> endPointsDisconnected;
|
std::mutex lock;
|
||||||
std::mutex lock;
|
std::condition_variable cv;
|
||||||
std::condition_variable cv;
|
|
||||||
|
|
||||||
auto server = std::make_shared<Interprocess::Connection::ConnectionMonitor>();
|
auto server = std::make_shared<Interprocess::Connection::ConnectionMonitor>();
|
||||||
|
|
||||||
auto noOpCallback = [](const auto&) { throw std::runtime_error("This will not be called."); };
|
auto noOpCallback = [](const auto&) {
|
||||||
auto callback = [&](const auto& endPoint)
|
throw std::runtime_error("This will not be called.");
|
||||||
{
|
};
|
||||||
std::unique_lock<std::mutex> guard{ lock };
|
auto callback = [&](const auto& endPoint) {
|
||||||
endPointsDisconnected.emplace_back(endPoint);
|
std::unique_lock<std::mutex> guard{lock};
|
||||||
cv.notify_one();
|
endPointsDisconnected.emplace_back(endPoint);
|
||||||
};
|
cv.notify_one();
|
||||||
|
};
|
||||||
|
|
||||||
auto client1 = std::make_shared<Interprocess::Connection::ConnectionMonitor>();
|
auto client1 =
|
||||||
client1->Register(server->GetLocalEndPointInfo(), noOpCallback);
|
std::make_shared<Interprocess::Connection::ConnectionMonitor>();
|
||||||
server->Register(client1->GetLocalEndPointInfo(), callback);
|
client1->Register(server->GetLocalEndPointInfo(), noOpCallback);
|
||||||
|
server->Register(client1->GetLocalEndPointInfo(), callback);
|
||||||
|
|
||||||
// Registering the same end point is not allowed.
|
// Registering the same end point is not allowed.
|
||||||
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
||||||
server->Register(client1->GetLocalEndPointInfo(), noOpCallback); ,
|
server->Register(client1->GetLocalEndPointInfo(), noOpCallback);
|
||||||
"Duplicate end point found.");
|
, "Duplicate end point found.");
|
||||||
|
|
||||||
auto client2 = std::make_shared<Interprocess::Connection::ConnectionMonitor>();
|
auto client2 =
|
||||||
client2->Register(server->GetLocalEndPointInfo(), callback);
|
std::make_shared<Interprocess::Connection::ConnectionMonitor>();
|
||||||
server->Register(client2->GetLocalEndPointInfo(), noOpCallback);
|
client2->Register(server->GetLocalEndPointInfo(), callback);
|
||||||
|
server->Register(client2->GetLocalEndPointInfo(), noOpCallback);
|
||||||
|
|
||||||
auto client3 = std::make_shared<Interprocess::Connection::ConnectionMonitor>();
|
auto client3 =
|
||||||
client3->Register(server->GetLocalEndPointInfo(), callback);
|
std::make_shared<Interprocess::Connection::ConnectionMonitor>();
|
||||||
server->Register(client3->GetLocalEndPointInfo(), noOpCallback);
|
client3->Register(server->GetLocalEndPointInfo(), callback);
|
||||||
|
server->Register(client3->GetLocalEndPointInfo(), noOpCallback);
|
||||||
|
|
||||||
BOOST_CHECK_EQUAL(server->GetRemoteConnectionsCount(), 3U);
|
BOOST_CHECK_EQUAL(server->GetRemoteConnectionsCount(), 3U);
|
||||||
|
|
||||||
// Kill client1 and check if the callback is called on the server side.
|
// Kill client1 and check if the callback is called on the server side.
|
||||||
auto client1EndPointInfo = client1->GetLocalEndPointInfo();
|
auto client1EndPointInfo = client1->GetLocalEndPointInfo();
|
||||||
client1.reset();
|
client1.reset();
|
||||||
{
|
{
|
||||||
std::unique_lock<std::mutex> guard{ lock };
|
std::unique_lock<std::mutex> guard{lock};
|
||||||
cv.wait(guard, [&] { return endPointsDisconnected.size() >= 1U; });
|
cv.wait(guard, [&] { return endPointsDisconnected.size() >= 1U; });
|
||||||
BOOST_REQUIRE_EQUAL(endPointsDisconnected.size(), 1U);
|
BOOST_REQUIRE_EQUAL(endPointsDisconnected.size(), 1U);
|
||||||
BOOST_CHECK(endPointsDisconnected[0] == client1EndPointInfo);
|
BOOST_CHECK(endPointsDisconnected[0] == client1EndPointInfo);
|
||||||
endPointsDisconnected.clear();
|
endPointsDisconnected.clear();
|
||||||
BOOST_CHECK_EQUAL(server->GetRemoteConnectionsCount(), 2U);
|
BOOST_CHECK_EQUAL(server->GetRemoteConnectionsCount(), 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now kill server and check if both callbacks in client2 and client3 are called.
|
// Now kill server and check if both callbacks in client2 and client3 are
|
||||||
auto serverEndPointInfo = server->GetLocalEndPointInfo();
|
// called.
|
||||||
server.reset();
|
auto serverEndPointInfo = server->GetLocalEndPointInfo();
|
||||||
{
|
server.reset();
|
||||||
std::unique_lock<std::mutex> guard{ lock };
|
{
|
||||||
cv.wait(guard, [&] { return endPointsDisconnected.size() >= 2U; });
|
std::unique_lock<std::mutex> guard{lock};
|
||||||
BOOST_REQUIRE_EQUAL(endPointsDisconnected.size(), 2U);
|
cv.wait(guard, [&] { return endPointsDisconnected.size() >= 2U; });
|
||||||
BOOST_CHECK(endPointsDisconnected[0] == serverEndPointInfo);
|
BOOST_REQUIRE_EQUAL(endPointsDisconnected.size(), 2U);
|
||||||
BOOST_CHECK(endPointsDisconnected[1] == serverEndPointInfo);
|
BOOST_CHECK(endPointsDisconnected[0] == serverEndPointInfo);
|
||||||
endPointsDisconnected.clear();
|
BOOST_CHECK(endPointsDisconnected[1] == serverEndPointInfo);
|
||||||
BOOST_CHECK_EQUAL(client2->GetRemoteConnectionsCount(), 0U);
|
endPointsDisconnected.clear();
|
||||||
BOOST_CHECK_EQUAL(client3->GetRemoteConnectionsCount(), 0U);
|
BOOST_CHECK_EQUAL(client2->GetRemoteConnectionsCount(), 0U);
|
||||||
}
|
BOOST_CHECK_EQUAL(client3->GetRemoteConnectionsCount(), 0U);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE_END()
|
BOOST_AUTO_TEST_SUITE_END()
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
|
@ -1,187 +1,190 @@
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <boost/test/unit_test.hpp>
|
#include <boost/test/unit_test.hpp>
|
||||||
#include "Utils.h"
|
|
||||||
#include "L4/Epoch/EpochQueue.h"
|
|
||||||
#include "L4/Epoch/EpochActionManager.h"
|
#include "L4/Epoch/EpochActionManager.h"
|
||||||
|
#include "L4/Epoch/EpochQueue.h"
|
||||||
#include "L4/LocalMemory/EpochManager.h"
|
#include "L4/LocalMemory/EpochManager.h"
|
||||||
#include "L4/Log/PerfCounter.h"
|
#include "L4/Log/PerfCounter.h"
|
||||||
#include "L4/Utils/Lock.h"
|
#include "L4/Utils/Lock.h"
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE(EpochManagerTests)
|
BOOST_AUTO_TEST_SUITE(EpochManagerTests)
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(EpochRefManagerTest)
|
BOOST_AUTO_TEST_CASE(EpochRefManagerTest) {
|
||||||
{
|
std::uint64_t currentEpochCounter = 5U;
|
||||||
std::uint64_t currentEpochCounter = 5U;
|
const std::uint32_t c_epochQueueSize = 100U;
|
||||||
const std::uint32_t c_epochQueueSize = 100U;
|
|
||||||
|
|
||||||
using EpochQueue = EpochQueue<
|
using EpochQueue =
|
||||||
boost::shared_lock_guard<L4::Utils::ReaderWriterLockSlim>,
|
EpochQueue<boost::shared_lock_guard<L4::Utils::ReaderWriterLockSlim>,
|
||||||
std::lock_guard<L4::Utils::ReaderWriterLockSlim>>;
|
std::lock_guard<L4::Utils::ReaderWriterLockSlim>>;
|
||||||
|
|
||||||
EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize);
|
EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize);
|
||||||
|
|
||||||
// Initially the ref count at the current epoch counter should be 0.
|
// Initially the ref count at the current epoch counter should be 0.
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U);
|
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U);
|
||||||
|
|
||||||
EpochRefManager<EpochQueue> epochManager(epochQueue);
|
EpochRefManager<EpochQueue> epochManager(epochQueue);
|
||||||
|
|
||||||
BOOST_CHECK_EQUAL(epochManager.AddRef(), currentEpochCounter);
|
BOOST_CHECK_EQUAL(epochManager.AddRef(), currentEpochCounter);
|
||||||
|
|
||||||
// Validate that a reference count is incremented at the current epoch counter.
|
// Validate that a reference count is incremented at the current epoch
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 1U);
|
// counter.
|
||||||
|
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 1U);
|
||||||
|
|
||||||
epochManager.RemoveRef(currentEpochCounter);
|
epochManager.RemoveRef(currentEpochCounter);
|
||||||
|
|
||||||
// Validate that a reference count is back to 0.
|
// Validate that a reference count is back to 0.
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U);
|
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[currentEpochCounter], 0U);
|
||||||
|
|
||||||
// Decrementing a reference counter when it is already 0 will result in an exception.
|
// Decrementing a reference counter when it is already 0 will result in an
|
||||||
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
// exception.
|
||||||
epochManager.RemoveRef(currentEpochCounter);,
|
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
||||||
"Reference counter is invalid.");
|
epochManager.RemoveRef(currentEpochCounter);
|
||||||
|
, "Reference counter is invalid.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(EpochCounterManagerTest) {
|
||||||
|
std::uint64_t currentEpochCounter = 0U;
|
||||||
|
const std::uint32_t c_epochQueueSize = 100U;
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(EpochCounterManagerTest)
|
using EpochQueue =
|
||||||
{
|
EpochQueue<boost::shared_lock_guard<L4::Utils::ReaderWriterLockSlim>,
|
||||||
std::uint64_t currentEpochCounter = 0U;
|
std::lock_guard<L4::Utils::ReaderWriterLockSlim>>;
|
||||||
const std::uint32_t c_epochQueueSize = 100U;
|
|
||||||
|
|
||||||
using EpochQueue = EpochQueue<
|
EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize);
|
||||||
boost::shared_lock_guard<L4::Utils::ReaderWriterLockSlim>,
|
|
||||||
std::lock_guard<L4::Utils::ReaderWriterLockSlim>>;
|
|
||||||
|
|
||||||
EpochQueue epochQueue(currentEpochCounter, c_epochQueueSize);
|
EpochCounterManager<EpochQueue> epochCounterManager(epochQueue);
|
||||||
|
|
||||||
EpochCounterManager<EpochQueue> epochCounterManager(epochQueue);
|
// If RemoveUnreferenceEpochCounters() is called when m_fonrtIndex and
|
||||||
|
// m_backIndex are the same, it will just return either value.
|
||||||
|
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(),
|
||||||
|
currentEpochCounter);
|
||||||
|
|
||||||
// If RemoveUnreferenceEpochCounters() is called when m_fonrtIndex and m_backIndex are
|
// Add two epoch counts.
|
||||||
// the same, it will just return either value.
|
++currentEpochCounter;
|
||||||
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
|
++currentEpochCounter;
|
||||||
|
epochCounterManager.AddNewEpoch();
|
||||||
|
epochCounterManager.AddNewEpoch();
|
||||||
|
|
||||||
// Add two epoch counts.
|
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, 0U);
|
||||||
++currentEpochCounter;
|
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
||||||
++currentEpochCounter;
|
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[epochQueue.m_frontIndex], 0U);
|
||||||
epochCounterManager.AddNewEpoch();
|
|
||||||
epochCounterManager.AddNewEpoch();
|
|
||||||
|
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, 0U);
|
// Since the m_frontIndex's reference count was zero, it will be incremented
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
// all the way to currentEpochCounter.
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_refCounts[epochQueue.m_frontIndex], 0U);
|
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(),
|
||||||
|
currentEpochCounter);
|
||||||
|
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
|
||||||
|
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
||||||
|
|
||||||
// Since the m_frontIndex's reference count was zero, it will be incremented
|
EpochRefManager<EpochQueue> epochRefManager(epochQueue);
|
||||||
// all the way to currentEpochCounter.
|
|
||||||
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
|
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
|
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
|
||||||
|
|
||||||
EpochRefManager<EpochQueue> epochRefManager(epochQueue);
|
// Now add a reference at the currentEpochCounter;
|
||||||
|
const auto epochCounterReferenced = epochRefManager.AddRef();
|
||||||
|
BOOST_CHECK_EQUAL(epochCounterReferenced, currentEpochCounter);
|
||||||
|
|
||||||
// Now add a reference at the currentEpochCounter;
|
// Calling RemoveUnreferenceEpochCounters() should just return
|
||||||
const auto epochCounterReferenced = epochRefManager.AddRef();
|
// currentEpochCounter since m_frontIndex and m_backIndex is the same. (Not
|
||||||
BOOST_CHECK_EQUAL(epochCounterReferenced, currentEpochCounter);
|
// affected by adding a reference yet).
|
||||||
|
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(),
|
||||||
|
currentEpochCounter);
|
||||||
|
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
|
||||||
|
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
||||||
|
|
||||||
// Calling RemoveUnreferenceEpochCounters() should just return currentEpochCounter
|
// Add one epoch count.
|
||||||
// since m_frontIndex and m_backIndex is the same. (Not affected by adding a reference yet).
|
++currentEpochCounter;
|
||||||
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
|
epochCounterManager.AddNewEpoch();
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
|
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
|
||||||
|
|
||||||
// Add one epoch count.
|
// Now RemoveUnreferenceEpochCounters() should return epochCounterReferenced
|
||||||
++currentEpochCounter;
|
// because of the reference count.
|
||||||
epochCounterManager.AddNewEpoch();
|
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(),
|
||||||
|
epochCounterReferenced);
|
||||||
|
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, epochCounterReferenced);
|
||||||
|
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
||||||
|
|
||||||
// Now RemoveUnreferenceEpochCounters() should return epochCounterReferenced because
|
// Remove the reference.
|
||||||
// of the reference count.
|
epochRefManager.RemoveRef(epochCounterReferenced);
|
||||||
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), epochCounterReferenced);
|
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, epochCounterReferenced);
|
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
|
||||||
|
|
||||||
// Remove the reference.
|
// Now RemoveUnreferenceEpochCounters() should return currentEpochCounter and
|
||||||
epochRefManager.RemoveRef(epochCounterReferenced);
|
// m_frontIndex should be in sync with m_backIndex.
|
||||||
|
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(),
|
||||||
// Now RemoveUnreferenceEpochCounters() should return currentEpochCounter and m_frontIndex
|
currentEpochCounter);
|
||||||
// should be in sync with m_backIndex.
|
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
|
||||||
BOOST_CHECK_EQUAL(epochCounterManager.RemoveUnreferenceEpochCounters(), currentEpochCounter);
|
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_frontIndex, currentEpochCounter);
|
|
||||||
BOOST_CHECK_EQUAL(epochQueue.m_backIndex, currentEpochCounter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(EpochActionManagerTest) {
|
||||||
|
EpochActionManager actionManager(2U);
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(EpochActionManagerTest)
|
bool isAction1Called = false;
|
||||||
{
|
bool isAction2Called = false;
|
||||||
EpochActionManager actionManager(2U);
|
|
||||||
|
|
||||||
bool isAction1Called = false;
|
auto action1 = [&]() { isAction1Called = true; };
|
||||||
bool isAction2Called = false;
|
auto action2 = [&]() { isAction2Called = true; };
|
||||||
|
|
||||||
auto action1 = [&]() { isAction1Called = true; };
|
// Register action1 and action2 at epoch count 5 and 6 respectively.
|
||||||
auto action2 = [&]() { isAction2Called = true; };
|
actionManager.RegisterAction(5U, action1);
|
||||||
|
actionManager.RegisterAction(6U, action2);
|
||||||
|
|
||||||
// Register action1 and action2 at epoch count 5 and 6 respectively.
|
BOOST_CHECK(!isAction1Called && !isAction2Called);
|
||||||
actionManager.RegisterAction(5U, action1);
|
|
||||||
actionManager.RegisterAction(6U, action2);
|
|
||||||
|
|
||||||
BOOST_CHECK(!isAction1Called && !isAction2Called);
|
actionManager.PerformActions(4);
|
||||||
|
BOOST_CHECK(!isAction1Called && !isAction2Called);
|
||||||
|
|
||||||
actionManager.PerformActions(4);
|
actionManager.PerformActions(5);
|
||||||
BOOST_CHECK(!isAction1Called && !isAction2Called);
|
BOOST_CHECK(!isAction1Called && !isAction2Called);
|
||||||
|
|
||||||
actionManager.PerformActions(5);
|
actionManager.PerformActions(6);
|
||||||
BOOST_CHECK(!isAction1Called && !isAction2Called);
|
BOOST_CHECK(isAction1Called && !isAction2Called);
|
||||||
|
|
||||||
actionManager.PerformActions(6);
|
actionManager.PerformActions(7);
|
||||||
BOOST_CHECK(isAction1Called && !isAction2Called);
|
BOOST_CHECK(isAction1Called && isAction2Called);
|
||||||
|
|
||||||
actionManager.PerformActions(7);
|
|
||||||
BOOST_CHECK(isAction1Called && isAction2Called);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(EpochManagerTest) {
|
||||||
|
ServerPerfData perfData;
|
||||||
|
LocalMemory::EpochManager epochManager(
|
||||||
|
EpochManagerConfig(100000U, std::chrono::milliseconds(5U), 1U), perfData);
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(EpochManagerTest)
|
std::atomic<bool> isActionCalled{false};
|
||||||
{
|
auto action = [&]() { isActionCalled = true; };
|
||||||
ServerPerfData perfData;
|
|
||||||
LocalMemory::EpochManager epochManager(
|
|
||||||
EpochManagerConfig(100000U, std::chrono::milliseconds(5U), 1U),
|
|
||||||
perfData);
|
|
||||||
|
|
||||||
std::atomic<bool> isActionCalled{ false };
|
auto epochCounterReferenced = epochManager.GetEpochRefManager().AddRef();
|
||||||
auto action = [&]() { isActionCalled = true; };
|
|
||||||
|
|
||||||
auto epochCounterReferenced = epochManager.GetEpochRefManager().AddRef();
|
epochManager.RegisterAction(action);
|
||||||
|
|
||||||
epochManager.RegisterAction(action);
|
// Justification for using sleep_for in unit tests:
|
||||||
|
// - EpochManager already uses an internal thread which wakes up and perform a
|
||||||
|
// task in a given interval and when the class is destroyed, there is a
|
||||||
|
// mechanism for waiting for the thread anyway. It's more crucial to test the
|
||||||
|
// end to end scenario this way.
|
||||||
|
// - The overall execution time for this test is less than 50 milliseconds.
|
||||||
|
auto initialEpochCounter =
|
||||||
|
perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue);
|
||||||
|
while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) -
|
||||||
|
initialEpochCounter <
|
||||||
|
2) {
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||||
|
}
|
||||||
|
|
||||||
// Justification for using sleep_for in unit tests:
|
BOOST_CHECK(!isActionCalled);
|
||||||
// - EpochManager already uses an internal thread which wakes up and perform a task
|
|
||||||
// in a given interval and when the class is destroyed, there is a mechanism for
|
|
||||||
// waiting for the thread anyway. It's more crucial to test the end to end scenario this way.
|
|
||||||
// - The overall execution time for this test is less than 50 milliseconds.
|
|
||||||
auto initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue);
|
|
||||||
while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2)
|
|
||||||
{
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
|
||||||
}
|
|
||||||
|
|
||||||
BOOST_CHECK(!isActionCalled);
|
epochManager.GetEpochRefManager().RemoveRef(epochCounterReferenced);
|
||||||
|
|
||||||
epochManager.GetEpochRefManager().RemoveRef(epochCounterReferenced);
|
initialEpochCounter =
|
||||||
|
perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue);
|
||||||
|
while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) -
|
||||||
|
initialEpochCounter <
|
||||||
|
2) {
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
||||||
|
}
|
||||||
|
|
||||||
initialEpochCounter = perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue);
|
BOOST_CHECK(isActionCalled);
|
||||||
while (perfData.Get(ServerPerfCounter::LatestEpochCounterInQueue) - initialEpochCounter < 2)
|
|
||||||
{
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(5));
|
|
||||||
}
|
|
||||||
|
|
||||||
BOOST_CHECK(isActionCalled);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE_END()
|
BOOST_AUTO_TEST_SUITE_END()
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,128 +1,111 @@
|
||||||
#include <boost/test/unit_test.hpp>
|
#include <boost/test/unit_test.hpp>
|
||||||
#include "Utils.h"
|
|
||||||
#include "Mocks.h"
|
|
||||||
#include "L4/HashTable/Config.h"
|
#include "L4/HashTable/Config.h"
|
||||||
#include "L4/HashTable/IHashTable.h"
|
#include "L4/HashTable/IHashTable.h"
|
||||||
#include "L4/LocalMemory/HashTableManager.h"
|
#include "L4/LocalMemory/HashTableManager.h"
|
||||||
|
#include "Mocks.h"
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
class HashTableManagerTestsFixture
|
class HashTableManagerTestsFixture {
|
||||||
{
|
protected:
|
||||||
protected:
|
template <typename Store>
|
||||||
template <typename Store>
|
void ValidateRecord(const Store& store,
|
||||||
void ValidateRecord(
|
const char* expectedKeyStr,
|
||||||
const Store& store,
|
const char* expectedValueStr) {
|
||||||
const char* expectedKeyStr,
|
IReadOnlyHashTable::Value actualValue;
|
||||||
const char* expectedValueStr)
|
auto expectedValue =
|
||||||
{
|
Utils::ConvertFromString<IReadOnlyHashTable::Value>(expectedValueStr);
|
||||||
IReadOnlyHashTable::Value actualValue;
|
BOOST_CHECK(store.Get(
|
||||||
auto expectedValue = Utils::ConvertFromString<IReadOnlyHashTable::Value>(expectedValueStr);
|
Utils::ConvertFromString<IReadOnlyHashTable::Key>(expectedKeyStr),
|
||||||
BOOST_CHECK(store.Get(Utils::ConvertFromString<IReadOnlyHashTable::Key>(expectedKeyStr), actualValue));
|
actualValue));
|
||||||
BOOST_CHECK(actualValue.m_size == expectedValue.m_size);
|
BOOST_CHECK(actualValue.m_size == expectedValue.m_size);
|
||||||
BOOST_CHECK(!memcmp(actualValue.m_data, expectedValue.m_data, expectedValue.m_size));
|
BOOST_CHECK(!memcmp(actualValue.m_data, expectedValue.m_data,
|
||||||
}
|
expectedValue.m_size));
|
||||||
|
}
|
||||||
|
|
||||||
MockEpochManager m_epochManager;
|
MockEpochManager m_epochManager;
|
||||||
std::allocator<void> m_allocator;
|
std::allocator<void> m_allocator;
|
||||||
};
|
};
|
||||||
|
|
||||||
BOOST_FIXTURE_TEST_SUITE(HashTableManagerTests, HashTableManagerTestsFixture)
|
BOOST_FIXTURE_TEST_SUITE(HashTableManagerTests, HashTableManagerTestsFixture)
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(HashTableManagerTest)
|
BOOST_AUTO_TEST_CASE(HashTableManagerTest) {
|
||||||
{
|
LocalMemory::HashTableManager htManager;
|
||||||
LocalMemory::HashTableManager htManager;
|
const auto ht1Index = htManager.Add(
|
||||||
const auto ht1Index = htManager.Add(
|
HashTableConfig("HashTable1", HashTableConfig::Setting(100U)),
|
||||||
HashTableConfig("HashTable1", HashTableConfig::Setting(100U)),
|
m_epochManager, m_allocator);
|
||||||
m_epochManager,
|
const auto ht2Index = htManager.Add(
|
||||||
m_allocator);
|
HashTableConfig("HashTable2", HashTableConfig::Setting(200U)),
|
||||||
const auto ht2Index = htManager.Add(
|
m_epochManager, m_allocator);
|
||||||
HashTableConfig("HashTable2", HashTableConfig::Setting(200U)),
|
|
||||||
m_epochManager,
|
|
||||||
m_allocator);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto& hashTable1 = htManager.GetHashTable("HashTable1");
|
auto& hashTable1 = htManager.GetHashTable("HashTable1");
|
||||||
hashTable1.Add(
|
hashTable1.Add(
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Key>("HashTable1Key"),
|
Utils::ConvertFromString<IReadOnlyHashTable::Key>("HashTable1Key"),
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Value>("HashTable1Value"));
|
Utils::ConvertFromString<IReadOnlyHashTable::Value>("HashTable1Value"));
|
||||||
|
|
||||||
auto& hashTable2 = htManager.GetHashTable("HashTable2");
|
auto& hashTable2 = htManager.GetHashTable("HashTable2");
|
||||||
hashTable2.Add(
|
hashTable2.Add(
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Key>("HashTable2Key"),
|
Utils::ConvertFromString<IReadOnlyHashTable::Key>("HashTable2Key"),
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Value>("HashTable2Value"));
|
Utils::ConvertFromString<IReadOnlyHashTable::Value>("HashTable2Value"));
|
||||||
}
|
}
|
||||||
|
|
||||||
ValidateRecord(
|
ValidateRecord(htManager.GetHashTable(ht1Index), "HashTable1Key",
|
||||||
htManager.GetHashTable(ht1Index),
|
"HashTable1Value");
|
||||||
"HashTable1Key",
|
|
||||||
"HashTable1Value");
|
|
||||||
|
|
||||||
ValidateRecord(
|
ValidateRecord(htManager.GetHashTable(ht2Index), "HashTable2Key",
|
||||||
htManager.GetHashTable(ht2Index),
|
"HashTable2Value");
|
||||||
"HashTable2Key",
|
|
||||||
"HashTable2Value");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(HashTableManagerTestForSerialzation) {
|
||||||
|
HashTableConfig htConfig{"HashTable1", HashTableConfig::Setting(100U)};
|
||||||
|
std::ostringstream outStream;
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(HashTableManagerTestForSerialzation)
|
std::vector<std::pair<std::string, std::string>> testData;
|
||||||
{
|
for (std::int32_t i = 0; i < 10; ++i) {
|
||||||
HashTableConfig htConfig{ "HashTable1", HashTableConfig::Setting(100U) };
|
testData.emplace_back("key" + std::to_string(i), "val" + std::to_string(i));
|
||||||
std::ostringstream outStream;
|
}
|
||||||
|
|
||||||
std::vector<std::pair<std::string, std::string>> testData;
|
// Serialize a hash table.
|
||||||
for (std::int32_t i = 0; i < 10; ++i)
|
{
|
||||||
{
|
LocalMemory::HashTableManager htManager;
|
||||||
testData.emplace_back(
|
const auto ht1Index = htManager.Add(htConfig, m_epochManager, m_allocator);
|
||||||
"key" + std::to_string(i),
|
|
||||||
"val" + std::to_string(i));
|
auto& hashTable1 = htManager.GetHashTable("HashTable1");
|
||||||
|
|
||||||
|
for (const auto& kvPair : testData) {
|
||||||
|
hashTable1.Add(Utils::ConvertFromString<IReadOnlyHashTable::Key>(
|
||||||
|
kvPair.first.c_str()),
|
||||||
|
Utils::ConvertFromString<IReadOnlyHashTable::Value>(
|
||||||
|
kvPair.second.c_str()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialize a hash table.
|
auto serializer = hashTable1.GetSerializer();
|
||||||
{
|
serializer->Serialize(outStream, {});
|
||||||
LocalMemory::HashTableManager htManager;
|
}
|
||||||
const auto ht1Index = htManager.Add(htConfig, m_epochManager, m_allocator);
|
|
||||||
|
|
||||||
auto& hashTable1 = htManager.GetHashTable("HashTable1");
|
// Deserialize the hash table.
|
||||||
|
{
|
||||||
|
htConfig.m_serializer.emplace(
|
||||||
|
std::make_shared<std::istringstream>(outStream.str()));
|
||||||
|
|
||||||
for (const auto& kvPair : testData)
|
LocalMemory::HashTableManager htManager;
|
||||||
{
|
const auto ht1Index = htManager.Add(htConfig, m_epochManager, m_allocator);
|
||||||
hashTable1.Add(
|
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Key>(kvPair.first.c_str()),
|
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Value>(kvPair.second.c_str()));
|
|
||||||
}
|
|
||||||
|
|
||||||
auto serializer = hashTable1.GetSerializer();
|
auto& hashTable1 = htManager.GetHashTable("HashTable1");
|
||||||
serializer->Serialize(outStream, {});
|
BOOST_CHECK_EQUAL(
|
||||||
}
|
hashTable1.GetPerfData().Get(HashTablePerfCounter::RecordsCount),
|
||||||
|
testData.size());
|
||||||
// Deserialize the hash table.
|
|
||||||
{
|
for (const auto& kvPair : testData) {
|
||||||
htConfig.m_serializer.emplace(
|
ValidateRecord(hashTable1, kvPair.first.c_str(), kvPair.second.c_str());
|
||||||
std::make_shared<std::istringstream>(outStream.str()));
|
|
||||||
|
|
||||||
LocalMemory::HashTableManager htManager;
|
|
||||||
const auto ht1Index = htManager.Add(htConfig, m_epochManager, m_allocator);
|
|
||||||
|
|
||||||
auto& hashTable1 = htManager.GetHashTable("HashTable1");
|
|
||||||
BOOST_CHECK_EQUAL(
|
|
||||||
hashTable1.GetPerfData().Get(HashTablePerfCounter::RecordsCount),
|
|
||||||
testData.size());
|
|
||||||
|
|
||||||
for (const auto& kvPair : testData)
|
|
||||||
{
|
|
||||||
ValidateRecord(
|
|
||||||
hashTable1,
|
|
||||||
kvPair.first.c_str(),
|
|
||||||
kvPair.second.c_str());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE_END()
|
BOOST_AUTO_TEST_SUITE_END()
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,163 +1,161 @@
|
||||||
#include <boost/test/unit_test.hpp>
|
|
||||||
#include <boost/optional.hpp>
|
#include <boost/optional.hpp>
|
||||||
|
#include <boost/test/unit_test.hpp>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "L4/HashTable/Common/Record.h"
|
#include "L4/HashTable/Common/Record.h"
|
||||||
#include "Utils.h"
|
#include "Utils.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
using namespace HashTable;
|
using namespace HashTable;
|
||||||
|
|
||||||
class HashTableRecordTestFixture
|
class HashTableRecordTestFixture {
|
||||||
{
|
protected:
|
||||||
protected:
|
void Run(bool isFixedKey, bool isFixedValue, bool useMetaValue) {
|
||||||
void Run(bool isFixedKey, bool isFixedValue, bool useMetaValue)
|
BOOST_TEST_MESSAGE("Running with isFixedKey="
|
||||||
{
|
<< isFixedKey << ", isFixedValue=" << isFixedValue
|
||||||
BOOST_TEST_MESSAGE(
|
<< ", useMetatValue=" << useMetaValue);
|
||||||
"Running with isFixedKey=" << isFixedKey
|
|
||||||
<< ", isFixedValue=" << isFixedValue
|
|
||||||
<< ", useMetatValue=" << useMetaValue);
|
|
||||||
|
|
||||||
const std::string key = "TestKey";
|
const std::string key = "TestKey";
|
||||||
const std::string value = "TestValue";
|
const std::string value = "TestValue";
|
||||||
const std::string metaValue = "TestMetavalue";
|
const std::string metaValue = "TestMetavalue";
|
||||||
|
|
||||||
const auto recordOverhead = (isFixedKey ? 0U : c_keyTypeSize) + (isFixedValue ? 0U : c_valueTypeSize);
|
const auto recordOverhead = (isFixedKey ? 0U : c_keyTypeSize) +
|
||||||
|
(isFixedValue ? 0U : c_valueTypeSize);
|
||||||
|
|
||||||
Validate(
|
Validate(
|
||||||
RecordSerializer{
|
RecordSerializer{
|
||||||
isFixedKey ? static_cast<RecordSerializer::KeySize>(key.size()) : std::uint16_t(0),
|
isFixedKey ? static_cast<RecordSerializer::KeySize>(key.size())
|
||||||
isFixedValue ? static_cast<RecordSerializer::ValueSize>(value.size()) : 0U,
|
: std::uint16_t(0),
|
||||||
useMetaValue ? static_cast<RecordSerializer::ValueSize>(metaValue.size()) : 0U },
|
isFixedValue
|
||||||
key,
|
? static_cast<RecordSerializer::ValueSize>(value.size())
|
||||||
value,
|
: 0U,
|
||||||
recordOverhead + key.size() + value.size() + (useMetaValue ? metaValue.size() : 0U),
|
useMetaValue
|
||||||
recordOverhead,
|
? static_cast<RecordSerializer::ValueSize>(metaValue.size())
|
||||||
useMetaValue ? boost::optional<const std::string&>{ metaValue } : boost::none);
|
: 0U},
|
||||||
|
key, value,
|
||||||
|
recordOverhead + key.size() + value.size() +
|
||||||
|
(useMetaValue ? metaValue.size() : 0U),
|
||||||
|
recordOverhead,
|
||||||
|
useMetaValue ? boost::optional<const std::string&>{metaValue}
|
||||||
|
: boost::none);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void Validate(const RecordSerializer& serializer,
|
||||||
|
const std::string& keyStr,
|
||||||
|
const std::string& valueStr,
|
||||||
|
std::size_t expectedBufferSize,
|
||||||
|
std::size_t expectedRecordOverheadSize,
|
||||||
|
boost::optional<const std::string&> metadataStr = boost::none) {
|
||||||
|
BOOST_CHECK_EQUAL(serializer.CalculateRecordOverhead(),
|
||||||
|
expectedRecordOverheadSize);
|
||||||
|
|
||||||
|
const auto key = Utils::ConvertFromString<Record::Key>(keyStr.c_str());
|
||||||
|
const auto value =
|
||||||
|
Utils::ConvertFromString<Record::Value>(valueStr.c_str());
|
||||||
|
|
||||||
|
const auto bufferSize = serializer.CalculateBufferSize(key, value);
|
||||||
|
|
||||||
|
BOOST_REQUIRE_EQUAL(bufferSize, expectedBufferSize);
|
||||||
|
std::vector<std::uint8_t> buffer(bufferSize);
|
||||||
|
|
||||||
|
RecordBuffer* recordBuffer = nullptr;
|
||||||
|
|
||||||
|
if (metadataStr) {
|
||||||
|
auto metaValue =
|
||||||
|
Utils::ConvertFromString<Record::Value>(metadataStr->c_str());
|
||||||
|
recordBuffer = serializer.Serialize(key, value, metaValue, buffer.data(),
|
||||||
|
bufferSize);
|
||||||
|
} else {
|
||||||
|
recordBuffer =
|
||||||
|
serializer.Serialize(key, value, buffer.data(), bufferSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
const auto record = serializer.Deserialize(*recordBuffer);
|
||||||
void Validate(
|
|
||||||
const RecordSerializer& serializer,
|
|
||||||
const std::string& keyStr,
|
|
||||||
const std::string& valueStr,
|
|
||||||
std::size_t expectedBufferSize,
|
|
||||||
std::size_t expectedRecordOverheadSize,
|
|
||||||
boost::optional<const std::string&> metadataStr = boost::none)
|
|
||||||
{
|
|
||||||
BOOST_CHECK_EQUAL(serializer.CalculateRecordOverhead(), expectedRecordOverheadSize);
|
|
||||||
|
|
||||||
const auto key = Utils::ConvertFromString<Record::Key>(keyStr.c_str());
|
// Make sure the data serialized is in different memory location.
|
||||||
const auto value = Utils::ConvertFromString<Record::Value>(valueStr.c_str());
|
BOOST_CHECK(record.m_key.m_data != key.m_data);
|
||||||
|
BOOST_CHECK(record.m_value.m_data != value.m_data);
|
||||||
|
|
||||||
const auto bufferSize = serializer.CalculateBufferSize(key, value);
|
BOOST_CHECK(record.m_key == key);
|
||||||
|
if (metadataStr) {
|
||||||
BOOST_REQUIRE_EQUAL(bufferSize, expectedBufferSize);
|
const std::string newValueStr = *metadataStr + valueStr;
|
||||||
std::vector<std::uint8_t> buffer(bufferSize);
|
const auto newValue =
|
||||||
|
Utils::ConvertFromString<Record::Value>(newValueStr.c_str());
|
||||||
RecordBuffer* recordBuffer = nullptr;
|
BOOST_CHECK(record.m_value == newValue);
|
||||||
|
} else {
|
||||||
if (metadataStr)
|
BOOST_CHECK(record.m_value == value);
|
||||||
{
|
|
||||||
auto metaValue = Utils::ConvertFromString<Record::Value>(metadataStr->c_str());
|
|
||||||
recordBuffer = serializer.Serialize(key, value, metaValue, buffer.data(), bufferSize);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
recordBuffer = serializer.Serialize(key, value, buffer.data(), bufferSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto record = serializer.Deserialize(*recordBuffer);
|
|
||||||
|
|
||||||
// Make sure the data serialized is in different memory location.
|
|
||||||
BOOST_CHECK(record.m_key.m_data != key.m_data);
|
|
||||||
BOOST_CHECK(record.m_value.m_data != value.m_data);
|
|
||||||
|
|
||||||
BOOST_CHECK(record.m_key == key);
|
|
||||||
if (metadataStr)
|
|
||||||
{
|
|
||||||
const std::string newValueStr = *metadataStr + valueStr;
|
|
||||||
const auto newValue = Utils::ConvertFromString<Record::Value>(newValueStr.c_str());
|
|
||||||
BOOST_CHECK(record.m_value == newValue);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
BOOST_CHECK(record.m_value == value);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static constexpr std::size_t c_keyTypeSize = sizeof(Record::Key::size_type);
|
static constexpr std::size_t c_keyTypeSize = sizeof(Record::Key::size_type);
|
||||||
static constexpr std::size_t c_valueTypeSize = sizeof(Record::Value::size_type);
|
static constexpr std::size_t c_valueTypeSize =
|
||||||
|
sizeof(Record::Value::size_type);
|
||||||
};
|
};
|
||||||
|
|
||||||
BOOST_FIXTURE_TEST_SUITE(HashTableRecordTests, HashTableRecordTestFixture)
|
BOOST_FIXTURE_TEST_SUITE(HashTableRecordTests, HashTableRecordTestFixture)
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(RunAll)
|
BOOST_AUTO_TEST_CASE(RunAll) {
|
||||||
{
|
// Run all permutations for Run(), which takes three booleans.
|
||||||
// Run all permutations for Run(), which takes three booleans.
|
for (int i = 0; i < 8; ++i) {
|
||||||
for (int i = 0; i < 8; ++i)
|
Run(!!((i >> 2) & 1), !!((i >> 1) & 1), !!((i)&1));
|
||||||
{
|
}
|
||||||
Run(
|
|
||||||
!!((i >> 2) & 1),
|
|
||||||
!!((i >> 1) & 1),
|
|
||||||
!!((i) & 1));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(InvalidSizeTest) {
|
||||||
|
std::vector<std::uint8_t> buffer(100U);
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(InvalidSizeTest)
|
RecordSerializer serializer{4, 5};
|
||||||
{
|
|
||||||
std::vector<std::uint8_t> buffer(100U);
|
|
||||||
|
|
||||||
RecordSerializer serializer{ 4, 5 };
|
const std::string keyStr = "1234";
|
||||||
|
const std::string invalidStr = "999999";
|
||||||
|
const std::string valueStr = "12345";
|
||||||
|
|
||||||
const std::string keyStr = "1234";
|
const auto key = Utils::ConvertFromString<Record::Key>(keyStr.c_str());
|
||||||
const std::string invalidStr = "999999";
|
const auto value = Utils::ConvertFromString<Record::Value>(valueStr.c_str());
|
||||||
const std::string valueStr = "12345";
|
|
||||||
|
|
||||||
const auto key = Utils::ConvertFromString<Record::Key>(keyStr.c_str());
|
const auto invalidKey =
|
||||||
const auto value = Utils::ConvertFromString<Record::Value>(valueStr.c_str());
|
Utils::ConvertFromString<Record::Key>(invalidStr.c_str());
|
||||||
|
const auto invalidValue =
|
||||||
|
Utils::ConvertFromString<Record::Value>(invalidStr.c_str());
|
||||||
|
|
||||||
const auto invalidKey = Utils::ConvertFromString<Record::Key>(invalidStr.c_str());
|
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
||||||
const auto invalidValue = Utils::ConvertFromString<Record::Value>(invalidStr.c_str());
|
serializer.Serialize(invalidKey, value, buffer.data(), buffer.size()),
|
||||||
|
"Invalid key or value sizes are given.");
|
||||||
|
|
||||||
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
||||||
serializer.Serialize(invalidKey, value, buffer.data(), buffer.size()),
|
serializer.Serialize(key, invalidValue, buffer.data(), buffer.size()),
|
||||||
"Invalid key or value sizes are given.");
|
"Invalid key or value sizes are given.");
|
||||||
|
|
||||||
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
||||||
serializer.Serialize(key, invalidValue, buffer.data(), buffer.size()),
|
serializer.Serialize(invalidKey, invalidValue, buffer.data(),
|
||||||
"Invalid key or value sizes are given.");
|
buffer.size()),
|
||||||
|
"Invalid key or value sizes are given.");
|
||||||
|
|
||||||
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
// Normal case shouldn't thrown an exception.
|
||||||
serializer.Serialize(invalidKey, invalidValue, buffer.data(), buffer.size()),
|
serializer.Serialize(key, value, buffer.data(), buffer.size());
|
||||||
"Invalid key or value sizes are given.");
|
|
||||||
|
|
||||||
// Normal case shouldn't thrown an exception.
|
RecordSerializer serializerWithMetaValue{4, 5, 2};
|
||||||
serializer.Serialize(key, value, buffer.data(), buffer.size());
|
std::uint16_t metadata = 0;
|
||||||
|
|
||||||
RecordSerializer serializerWithMetaValue{ 4, 5, 2 };
|
Record::Value metaValue{reinterpret_cast<std::uint8_t*>(&metadata),
|
||||||
std::uint16_t metadata = 0;
|
sizeof(metadata)};
|
||||||
|
|
||||||
Record::Value metaValue{
|
// Normal case shouldn't thrown an exception.
|
||||||
reinterpret_cast<std::uint8_t*>(&metadata),
|
serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(),
|
||||||
sizeof(metadata) };
|
buffer.size());
|
||||||
|
|
||||||
// Normal case shouldn't thrown an exception.
|
// Mismatching size is given.
|
||||||
serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size());
|
metaValue.m_size = 1;
|
||||||
|
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
||||||
// Mismatching size is given.
|
serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(),
|
||||||
metaValue.m_size = 1;
|
buffer.size()),
|
||||||
CHECK_EXCEPTION_THROWN_WITH_MESSAGE(
|
"Invalid meta value size is given.");
|
||||||
serializerWithMetaValue.Serialize(key, value, metaValue, buffer.data(), buffer.size()),
|
|
||||||
"Invalid meta value size is given.");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE_END()
|
BOOST_AUTO_TEST_SUITE_END()
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,52 +1,46 @@
|
||||||
#include <boost/test/unit_test.hpp>
|
#include <boost/test/unit_test.hpp>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include "L4/LocalMemory/HashTableService.h"
|
||||||
#include "Mocks.h"
|
#include "Mocks.h"
|
||||||
#include "Utils.h"
|
#include "Utils.h"
|
||||||
#include "L4/LocalMemory/HashTableService.h"
|
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(HashTableServiceTest)
|
BOOST_AUTO_TEST_CASE(HashTableServiceTest) {
|
||||||
{
|
std::vector<std::pair<std::string, std::string>> dataSet;
|
||||||
std::vector<std::pair<std::string, std::string>> dataSet;
|
for (std::uint16_t i = 0U; i < 100; ++i) {
|
||||||
for (std::uint16_t i = 0U; i < 100; ++i)
|
dataSet.emplace_back("key" + std::to_string(i),
|
||||||
{
|
"value" + std::to_string(i));
|
||||||
dataSet.emplace_back("key" + std::to_string(i), "value" + std::to_string(i));
|
}
|
||||||
}
|
|
||||||
|
LocalMemory::HashTableService htService;
|
||||||
LocalMemory::HashTableService htService;
|
htService.AddHashTable(
|
||||||
htService.AddHashTable(
|
HashTableConfig("Table1", HashTableConfig::Setting{100U}));
|
||||||
HashTableConfig("Table1", HashTableConfig::Setting{ 100U }));
|
htService.AddHashTable(HashTableConfig(
|
||||||
htService.AddHashTable(
|
"Table2", HashTableConfig::Setting{1000U},
|
||||||
HashTableConfig(
|
HashTableConfig::Cache{1024, std::chrono::seconds{1U}, false}));
|
||||||
"Table2",
|
|
||||||
HashTableConfig::Setting{ 1000U },
|
for (const auto& data : dataSet) {
|
||||||
HashTableConfig::Cache{ 1024, std::chrono::seconds{ 1U }, false }));
|
htService.GetContext()["Table1"].Add(
|
||||||
|
Utils::ConvertFromString<IReadOnlyHashTable::Key>(data.first.c_str()),
|
||||||
for (const auto& data : dataSet)
|
Utils::ConvertFromString<IReadOnlyHashTable::Value>(
|
||||||
{
|
data.second.c_str()));
|
||||||
htService.GetContext()["Table1"].Add(
|
}
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Key>(data.first.c_str()),
|
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Value>(data.second.c_str()));
|
// Smoke tests for looking up the data .
|
||||||
}
|
{
|
||||||
|
auto context = htService.GetContext();
|
||||||
// Smoke tests for looking up the data .
|
for (const auto& data : dataSet) {
|
||||||
{
|
IReadOnlyHashTable::Value val;
|
||||||
auto context = htService.GetContext();
|
BOOST_CHECK(context["Table1"].Get(
|
||||||
for (const auto& data : dataSet)
|
Utils::ConvertFromString<IReadOnlyHashTable::Key>(data.first.c_str()),
|
||||||
{
|
val));
|
||||||
IReadOnlyHashTable::Value val;
|
BOOST_CHECK(Utils::ConvertToString(val) == data.second);
|
||||||
BOOST_CHECK(context["Table1"].Get(
|
|
||||||
Utils::ConvertFromString<IReadOnlyHashTable::Key>(data.first.c_str()),
|
|
||||||
val));
|
|
||||||
BOOST_CHECK(Utils::ConvertToString(val) == data.second);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -3,34 +3,23 @@
|
||||||
#include "L4/Epoch/IEpochActionManager.h"
|
#include "L4/Epoch/IEpochActionManager.h"
|
||||||
#include "L4/Log/PerfLogger.h"
|
#include "L4/Log/PerfLogger.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
class MockPerfLogger : public IPerfLogger
|
class MockPerfLogger : public IPerfLogger {
|
||||||
{
|
virtual void Log(const IData& data) override { (void)data; }
|
||||||
virtual void Log(const IData& data) override
|
|
||||||
{
|
|
||||||
(void)data;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MockEpochManager : public IEpochActionManager
|
struct MockEpochManager : public IEpochActionManager {
|
||||||
{
|
MockEpochManager() : m_numRegisterActionsCalled(0) {}
|
||||||
MockEpochManager()
|
|
||||||
: m_numRegisterActionsCalled(0)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void RegisterAction(Action&& action) override
|
virtual void RegisterAction(Action&& action) override {
|
||||||
{
|
++m_numRegisterActionsCalled;
|
||||||
++m_numRegisterActionsCalled;
|
action();
|
||||||
action();
|
};
|
||||||
};
|
|
||||||
|
|
||||||
std::uint16_t m_numRegisterActionsCalled;
|
std::uint16_t m_numRegisterActionsCalled;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
|
@ -2,103 +2,97 @@
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include "L4/Log/PerfLogger.h"
|
#include "L4/Log/PerfLogger.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
void CheckMinCounters(const HashTablePerfData& htPerfData)
|
void CheckMinCounters(const HashTablePerfData& htPerfData) {
|
||||||
{
|
const auto maxValue = (std::numeric_limits<std::int64_t>::max)();
|
||||||
const auto maxValue = (std::numeric_limits<std::int64_t>::max)();
|
/// Check if the min counter values are correctly initialized to max value.
|
||||||
/// Check if the min counter values are correctly initialized to max value.
|
BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinValueSize),
|
||||||
BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinValueSize), maxValue);
|
maxValue);
|
||||||
BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinKeySize), maxValue);
|
BOOST_CHECK_EQUAL(htPerfData.Get(HashTablePerfCounter::MinKeySize), maxValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(PerfCountersTest)
|
BOOST_AUTO_TEST_CASE(PerfCountersTest) {
|
||||||
{
|
enum class TestCounter { Counter = 0, Count };
|
||||||
enum class TestCounter
|
|
||||||
{
|
|
||||||
Counter = 0,
|
|
||||||
Count
|
|
||||||
};
|
|
||||||
|
|
||||||
PerfCounters<TestCounter> perfCounters;
|
PerfCounters<TestCounter> perfCounters;
|
||||||
|
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 0);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 0);
|
||||||
|
|
||||||
perfCounters.Set(TestCounter::Counter, 10);
|
perfCounters.Set(TestCounter::Counter, 10);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
|
||||||
|
|
||||||
perfCounters.Increment(TestCounter::Counter);
|
perfCounters.Increment(TestCounter::Counter);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 11);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 11);
|
||||||
|
|
||||||
perfCounters.Decrement(TestCounter::Counter);
|
perfCounters.Decrement(TestCounter::Counter);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
|
||||||
|
|
||||||
perfCounters.Add(TestCounter::Counter, 5);
|
perfCounters.Add(TestCounter::Counter, 5);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 15);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 15);
|
||||||
|
|
||||||
perfCounters.Subtract(TestCounter::Counter, 10);
|
perfCounters.Subtract(TestCounter::Counter, 10);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 5);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 5);
|
||||||
|
|
||||||
perfCounters.Max(TestCounter::Counter, 10);
|
perfCounters.Max(TestCounter::Counter, 10);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
|
||||||
|
|
||||||
perfCounters.Max(TestCounter::Counter, 9);
|
perfCounters.Max(TestCounter::Counter, 9);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 10);
|
||||||
|
|
||||||
perfCounters.Min(TestCounter::Counter, 1);
|
perfCounters.Min(TestCounter::Counter, 1);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1);
|
||||||
|
|
||||||
perfCounters.Min(TestCounter::Counter, 10);
|
perfCounters.Min(TestCounter::Counter, 10);
|
||||||
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1);
|
BOOST_CHECK_EQUAL(perfCounters.Get(TestCounter::Counter), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(PerfDataTest) {
|
||||||
|
PerfData testPerfData;
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(PerfDataTest)
|
BOOST_CHECK(testPerfData.GetHashTablesPerfData().empty());
|
||||||
{
|
|
||||||
PerfData testPerfData;
|
|
||||||
|
|
||||||
BOOST_CHECK(testPerfData.GetHashTablesPerfData().empty());
|
HashTablePerfData htPerfData1;
|
||||||
|
HashTablePerfData htPerfData2;
|
||||||
|
HashTablePerfData htPerfData3;
|
||||||
|
|
||||||
HashTablePerfData htPerfData1;
|
CheckMinCounters(htPerfData1);
|
||||||
HashTablePerfData htPerfData2;
|
CheckMinCounters(htPerfData2);
|
||||||
HashTablePerfData htPerfData3;
|
CheckMinCounters(htPerfData3);
|
||||||
|
|
||||||
CheckMinCounters(htPerfData1);
|
testPerfData.AddHashTablePerfData("HT1", htPerfData1);
|
||||||
CheckMinCounters(htPerfData2);
|
testPerfData.AddHashTablePerfData("HT2", htPerfData2);
|
||||||
CheckMinCounters(htPerfData3);
|
testPerfData.AddHashTablePerfData("HT3", htPerfData3);
|
||||||
|
|
||||||
testPerfData.AddHashTablePerfData("HT1", htPerfData1);
|
/// Update counters and check if they are correctly updated.
|
||||||
testPerfData.AddHashTablePerfData("HT2", htPerfData2);
|
htPerfData1.Set(HashTablePerfCounter::TotalKeySize, 10);
|
||||||
testPerfData.AddHashTablePerfData("HT3", htPerfData3);
|
htPerfData2.Set(HashTablePerfCounter::TotalKeySize, 20);
|
||||||
|
htPerfData3.Set(HashTablePerfCounter::TotalKeySize, 30);
|
||||||
|
|
||||||
/// Update counters and check if they are correctly updated.
|
// Check if the hash table perf data is correctly registered.
|
||||||
htPerfData1.Set(HashTablePerfCounter::TotalKeySize, 10);
|
const auto& hashTablesPerfData = testPerfData.GetHashTablesPerfData();
|
||||||
htPerfData2.Set(HashTablePerfCounter::TotalKeySize, 20);
|
BOOST_CHECK_EQUAL(hashTablesPerfData.size(), 3U);
|
||||||
htPerfData3.Set(HashTablePerfCounter::TotalKeySize, 30);
|
|
||||||
|
|
||||||
// Check if the hash table perf data is correctly registered.
|
{
|
||||||
const auto& hashTablesPerfData = testPerfData.GetHashTablesPerfData();
|
auto htPerfDataIt = hashTablesPerfData.find("HT1");
|
||||||
BOOST_CHECK_EQUAL(hashTablesPerfData.size(), 3U);
|
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
|
||||||
|
BOOST_CHECK_EQUAL(
|
||||||
{
|
htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 10);
|
||||||
auto htPerfDataIt = hashTablesPerfData.find("HT1");
|
}
|
||||||
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
|
{
|
||||||
BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 10);
|
auto htPerfDataIt = hashTablesPerfData.find("HT2");
|
||||||
}
|
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
|
||||||
{
|
BOOST_CHECK_EQUAL(
|
||||||
auto htPerfDataIt = hashTablesPerfData.find("HT2");
|
htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 20);
|
||||||
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
|
}
|
||||||
BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 20);
|
{
|
||||||
}
|
auto htPerfDataIt = hashTablesPerfData.find("HT3");
|
||||||
{
|
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
|
||||||
auto htPerfDataIt = hashTablesPerfData.find("HT3");
|
BOOST_CHECK_EQUAL(
|
||||||
BOOST_REQUIRE(htPerfDataIt != hashTablesPerfData.end());
|
htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 30);
|
||||||
BOOST_CHECK_EQUAL(htPerfDataIt->second.get().Get(HashTablePerfCounter::TotalKeySize), 30);
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,18 +1,16 @@
|
||||||
#include <boost/test/unit_test.hpp>
|
#include <boost/test/unit_test.hpp>
|
||||||
#include <string>
|
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "Utils.h"
|
|
||||||
#include "Mocks.h"
|
|
||||||
#include "L4/HashTable/ReadWrite/HashTable.h"
|
#include "L4/HashTable/ReadWrite/HashTable.h"
|
||||||
#include "L4/HashTable/ReadWrite/Serializer.h"
|
#include "L4/HashTable/ReadWrite/Serializer.h"
|
||||||
#include "L4/Log/PerfCounter.h"
|
|
||||||
#include "L4/LocalMemory/Memory.h"
|
#include "L4/LocalMemory/Memory.h"
|
||||||
|
#include "L4/Log/PerfCounter.h"
|
||||||
|
#include "Mocks.h"
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
using namespace HashTable::ReadWrite;
|
using namespace HashTable::ReadWrite;
|
||||||
|
|
||||||
|
@ -32,148 +30,127 @@ void ValidateSerializer(
|
||||||
const KeyValuePairs& keyValuePairs,
|
const KeyValuePairs& keyValuePairs,
|
||||||
const Utils::ExpectedCounterValues& expectedCounterValuesAfterLoad,
|
const Utils::ExpectedCounterValues& expectedCounterValuesAfterLoad,
|
||||||
const Utils::ExpectedCounterValues& expectedCounterValuesAfterSerialization,
|
const Utils::ExpectedCounterValues& expectedCounterValuesAfterSerialization,
|
||||||
const Utils::ExpectedCounterValues& expectedCounterValuesAfterDeserialization)
|
const Utils::ExpectedCounterValues&
|
||||||
{
|
expectedCounterValuesAfterDeserialization) {
|
||||||
Memory memory;
|
Memory memory;
|
||||||
MockEpochManager epochManager;
|
MockEpochManager epochManager;
|
||||||
|
|
||||||
auto hashTableHolder{
|
auto hashTableHolder{memory.MakeUnique<HashTable>(HashTable::Setting{5},
|
||||||
memory.MakeUnique<HashTable>(
|
memory.GetAllocator())};
|
||||||
HashTable::Setting{ 5 }, memory.GetAllocator()) };
|
BOOST_CHECK(hashTableHolder != nullptr);
|
||||||
BOOST_CHECK(hashTableHolder != nullptr);
|
|
||||||
|
|
||||||
WritableHashTable<Allocator> writableHashTable(*hashTableHolder, epochManager);
|
WritableHashTable<Allocator> writableHashTable(*hashTableHolder,
|
||||||
|
epochManager);
|
||||||
|
|
||||||
// Insert the given key/value pairs to the hash table.
|
// Insert the given key/value pairs to the hash table.
|
||||||
for (const auto& pair : keyValuePairs)
|
for (const auto& pair : keyValuePairs) {
|
||||||
{
|
auto key =
|
||||||
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(pair.first.c_str());
|
Utils::ConvertFromString<IReadOnlyHashTable::Key>(pair.first.c_str());
|
||||||
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(pair.second.c_str());
|
auto val = Utils::ConvertFromString<IReadOnlyHashTable::Value>(
|
||||||
|
pair.second.c_str());
|
||||||
|
|
||||||
writableHashTable.Add(key, val);
|
writableHashTable.Add(key, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto& perfData = writableHashTable.GetPerfData();
|
const auto& perfData = writableHashTable.GetPerfData();
|
||||||
|
|
||||||
Utils::ValidateCounters(perfData, expectedCounterValuesAfterLoad);
|
Utils::ValidateCounters(perfData, expectedCounterValuesAfterLoad);
|
||||||
|
|
||||||
// Now write the hash table to the stream.
|
// Now write the hash table to the stream.
|
||||||
std::ostringstream outStream;
|
std::ostringstream outStream;
|
||||||
serializer.Serialize(*hashTableHolder, outStream);
|
serializer.Serialize(*hashTableHolder, outStream);
|
||||||
Utils::ValidateCounters(perfData, expectedCounterValuesAfterSerialization);
|
Utils::ValidateCounters(perfData, expectedCounterValuesAfterSerialization);
|
||||||
|
|
||||||
// Read in the hash table from the stream and validate it.
|
// Read in the hash table from the stream and validate it.
|
||||||
std::istringstream inStream(outStream.str());
|
std::istringstream inStream(outStream.str());
|
||||||
|
|
||||||
// version == 0 means that it's run through the HashTableSerializer, thus the following can be skipped.
|
// version == 0 means that it's run through the HashTableSerializer, thus the
|
||||||
if (serializerVersion != 0)
|
// following can be skipped.
|
||||||
{
|
if (serializerVersion != 0) {
|
||||||
std::uint8_t actualSerializerVersion = 0;
|
std::uint8_t actualSerializerVersion = 0;
|
||||||
DeserializerHelper(inStream).Deserialize(actualSerializerVersion);
|
DeserializerHelper(inStream).Deserialize(actualSerializerVersion);
|
||||||
BOOST_CHECK(actualSerializerVersion == serializerVersion);
|
BOOST_CHECK(actualSerializerVersion == serializerVersion);
|
||||||
}
|
} else {
|
||||||
else
|
BOOST_REQUIRE(typeid(L4::HashTable::ReadWrite::Serializer<
|
||||||
{
|
HashTable, ReadOnlyHashTable>) == typeid(Serializer));
|
||||||
BOOST_REQUIRE(typeid(L4::HashTable::ReadWrite::Serializer<HashTable, ReadOnlyHashTable>) == typeid(Serializer));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
auto newHashTableHolder = deserializer.Deserialize(memory, inStream);
|
auto newHashTableHolder = deserializer.Deserialize(memory, inStream);
|
||||||
BOOST_CHECK(newHashTableHolder != nullptr);
|
BOOST_CHECK(newHashTableHolder != nullptr);
|
||||||
|
|
||||||
WritableHashTable<Allocator> newWritableHashTable(*newHashTableHolder, epochManager);
|
WritableHashTable<Allocator> newWritableHashTable(*newHashTableHolder,
|
||||||
|
epochManager);
|
||||||
|
|
||||||
const auto& newPerfData = newWritableHashTable.GetPerfData();
|
const auto& newPerfData = newWritableHashTable.GetPerfData();
|
||||||
|
|
||||||
Utils::ValidateCounters(newPerfData, expectedCounterValuesAfterDeserialization);
|
Utils::ValidateCounters(newPerfData,
|
||||||
|
expectedCounterValuesAfterDeserialization);
|
||||||
|
|
||||||
// Make sure all the key/value pairs exist after deserialization.
|
// Make sure all the key/value pairs exist after deserialization.
|
||||||
for (const auto& pair : keyValuePairs)
|
for (const auto& pair : keyValuePairs) {
|
||||||
{
|
auto key =
|
||||||
auto key = Utils::ConvertFromString<IReadOnlyHashTable::Key>(pair.first.c_str());
|
Utils::ConvertFromString<IReadOnlyHashTable::Key>(pair.first.c_str());
|
||||||
IReadOnlyHashTable::Value val;
|
IReadOnlyHashTable::Value val;
|
||||||
BOOST_CHECK(newWritableHashTable.Get(key, val));
|
BOOST_CHECK(newWritableHashTable.Get(key, val));
|
||||||
BOOST_CHECK(Utils::ConvertToString(val) == pair.second);
|
BOOST_CHECK(Utils::ConvertToString(val) == pair.second);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(CurrentSerializerTest) {
|
||||||
BOOST_AUTO_TEST_CASE(CurrentSerializerTest)
|
ValidateSerializer(
|
||||||
{
|
Current::Serializer<HashTable, ReadOnlyHashTable>{},
|
||||||
ValidateSerializer(
|
Current::Deserializer<Memory, HashTable, WritableHashTable>{
|
||||||
Current::Serializer<HashTable, ReadOnlyHashTable>{},
|
L4::Utils::Properties{}},
|
||||||
Current::Deserializer<Memory, HashTable, WritableHashTable>{ L4::Utils::Properties{} },
|
Current::c_version,
|
||||||
Current::c_version,
|
{{"hello1", " world1"}, {"hello2", " world2"}, {"hello3", " world3"}},
|
||||||
{
|
{{HashTablePerfCounter::RecordsCount, 3},
|
||||||
{ "hello1", " world1" },
|
{HashTablePerfCounter::BucketsCount, 5},
|
||||||
{ "hello2", " world2" },
|
{HashTablePerfCounter::TotalKeySize, 18},
|
||||||
{ "hello3", " world3" }
|
{HashTablePerfCounter::TotalValueSize, 21},
|
||||||
},
|
{HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0},
|
||||||
{
|
{HashTablePerfCounter::RecordsCountSavedFromSerializer, 0}},
|
||||||
{ HashTablePerfCounter::RecordsCount, 3 },
|
{{HashTablePerfCounter::RecordsCount, 3},
|
||||||
{ HashTablePerfCounter::BucketsCount, 5 },
|
{HashTablePerfCounter::BucketsCount, 5},
|
||||||
{ HashTablePerfCounter::TotalKeySize, 18 },
|
{HashTablePerfCounter::TotalKeySize, 18},
|
||||||
{ HashTablePerfCounter::TotalValueSize, 21 },
|
{HashTablePerfCounter::TotalValueSize, 21},
|
||||||
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
|
{HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0},
|
||||||
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
|
{HashTablePerfCounter::RecordsCountSavedFromSerializer, 3}},
|
||||||
},
|
{{HashTablePerfCounter::RecordsCount, 3},
|
||||||
{
|
{HashTablePerfCounter::BucketsCount, 5},
|
||||||
{ HashTablePerfCounter::RecordsCount, 3 },
|
{HashTablePerfCounter::TotalKeySize, 18},
|
||||||
{ HashTablePerfCounter::BucketsCount, 5 },
|
{HashTablePerfCounter::TotalValueSize, 21},
|
||||||
{ HashTablePerfCounter::TotalKeySize, 18 },
|
{HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3},
|
||||||
{ HashTablePerfCounter::TotalValueSize, 21 },
|
{HashTablePerfCounter::RecordsCountSavedFromSerializer, 0}});
|
||||||
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
|
|
||||||
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 }
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{ HashTablePerfCounter::RecordsCount, 3 },
|
|
||||||
{ HashTablePerfCounter::BucketsCount, 5 },
|
|
||||||
{ HashTablePerfCounter::TotalKeySize, 18 },
|
|
||||||
{ HashTablePerfCounter::TotalValueSize, 21 },
|
|
||||||
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 },
|
|
||||||
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(HashTableSerializeTest) {
|
||||||
BOOST_AUTO_TEST_CASE(HashTableSerializeTest)
|
// This test case tests end to end scenario using the HashTableSerializer.
|
||||||
{
|
ValidateSerializer(
|
||||||
// This test case tests end to end scenario using the HashTableSerializer.
|
Serializer<HashTable, ReadOnlyHashTable>{},
|
||||||
ValidateSerializer(
|
Deserializer<Memory, HashTable, WritableHashTable>{
|
||||||
Serializer<HashTable, ReadOnlyHashTable>{},
|
L4::Utils::Properties{}},
|
||||||
Deserializer<Memory, HashTable, WritableHashTable>{ L4::Utils::Properties{} },
|
0U, {{"hello1", " world1"}, {"hello2", " world2"}, {"hello3", " world3"}},
|
||||||
0U,
|
{{HashTablePerfCounter::RecordsCount, 3},
|
||||||
{
|
{HashTablePerfCounter::BucketsCount, 5},
|
||||||
{ "hello1", " world1" },
|
{HashTablePerfCounter::TotalKeySize, 18},
|
||||||
{ "hello2", " world2" },
|
{HashTablePerfCounter::TotalValueSize, 21},
|
||||||
{ "hello3", " world3" }
|
{HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0},
|
||||||
},
|
{HashTablePerfCounter::RecordsCountSavedFromSerializer, 0}},
|
||||||
{
|
{{HashTablePerfCounter::RecordsCount, 3},
|
||||||
{ HashTablePerfCounter::RecordsCount, 3 },
|
{HashTablePerfCounter::BucketsCount, 5},
|
||||||
{ HashTablePerfCounter::BucketsCount, 5 },
|
{HashTablePerfCounter::TotalKeySize, 18},
|
||||||
{ HashTablePerfCounter::TotalKeySize, 18 },
|
{HashTablePerfCounter::TotalValueSize, 21},
|
||||||
{ HashTablePerfCounter::TotalValueSize, 21 },
|
{HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0},
|
||||||
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
|
{HashTablePerfCounter::RecordsCountSavedFromSerializer, 3}},
|
||||||
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
|
{{HashTablePerfCounter::RecordsCount, 3},
|
||||||
},
|
{HashTablePerfCounter::BucketsCount, 5},
|
||||||
{
|
{HashTablePerfCounter::TotalKeySize, 18},
|
||||||
{ HashTablePerfCounter::RecordsCount, 3 },
|
{HashTablePerfCounter::TotalValueSize, 21},
|
||||||
{ HashTablePerfCounter::BucketsCount, 5 },
|
{HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3},
|
||||||
{ HashTablePerfCounter::TotalKeySize, 18 },
|
{HashTablePerfCounter::RecordsCountSavedFromSerializer, 0}});
|
||||||
{ HashTablePerfCounter::TotalValueSize, 21 },
|
|
||||||
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 0 },
|
|
||||||
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 3 }
|
|
||||||
},
|
|
||||||
{
|
|
||||||
{ HashTablePerfCounter::RecordsCount, 3 },
|
|
||||||
{ HashTablePerfCounter::BucketsCount, 5 },
|
|
||||||
{ HashTablePerfCounter::TotalKeySize, 18 },
|
|
||||||
{ HashTablePerfCounter::TotalValueSize, 21 },
|
|
||||||
{ HashTablePerfCounter::RecordsCountLoadedFromSerializer, 3 },
|
|
||||||
{ HashTablePerfCounter::RecordsCountSavedFromSerializer, 0 }
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE_END()
|
BOOST_AUTO_TEST_SUITE_END()
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,41 +1,37 @@
|
||||||
#include <boost/test/unit_test.hpp>
|
#include <boost/test/unit_test.hpp>
|
||||||
#include "L4/HashTable/Common/SettingAdapter.h"
|
|
||||||
#include "L4/HashTable/Common/Record.h"
|
|
||||||
#include "CheckedAllocator.h"
|
#include "CheckedAllocator.h"
|
||||||
|
#include "L4/HashTable/Common/Record.h"
|
||||||
|
#include "L4/HashTable/Common/SettingAdapter.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
using SharedHashTable = HashTable::SharedHashTable<HashTable::RecordBuffer, CheckedAllocator<>>;
|
using SharedHashTable =
|
||||||
|
HashTable::SharedHashTable<HashTable::RecordBuffer, CheckedAllocator<>>;
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE(SettingAdapterTests)
|
BOOST_AUTO_TEST_SUITE(SettingAdapterTests)
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(SettingAdapterTestWithDefaultValues)
|
BOOST_AUTO_TEST_CASE(SettingAdapterTestWithDefaultValues) {
|
||||||
{
|
HashTableConfig::Setting from{100U};
|
||||||
HashTableConfig::Setting from{ 100U };
|
const auto to = HashTable::SettingAdapter{}.Convert<SharedHashTable>(from);
|
||||||
const auto to = HashTable::SettingAdapter{}.Convert<SharedHashTable>(from);
|
|
||||||
|
|
||||||
BOOST_CHECK_EQUAL(to.m_numBuckets, 100U);
|
BOOST_CHECK_EQUAL(to.m_numBuckets, 100U);
|
||||||
BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 1U);
|
BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 1U);
|
||||||
BOOST_CHECK_EQUAL(to.m_fixedKeySize, 0U);
|
BOOST_CHECK_EQUAL(to.m_fixedKeySize, 0U);
|
||||||
BOOST_CHECK_EQUAL(to.m_fixedValueSize, 0U);
|
BOOST_CHECK_EQUAL(to.m_fixedValueSize, 0U);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(SettingAdapterTestWithNonDefaultValues) {
|
||||||
|
HashTableConfig::Setting from{100U, 10U, 5U, 20U};
|
||||||
|
const auto to = HashTable::SettingAdapter{}.Convert<SharedHashTable>(from);
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(SettingAdapterTestWithNonDefaultValues)
|
BOOST_CHECK_EQUAL(to.m_numBuckets, 100U);
|
||||||
{
|
BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 10U);
|
||||||
HashTableConfig::Setting from{ 100U, 10U, 5U, 20U };
|
BOOST_CHECK_EQUAL(to.m_fixedKeySize, 5U);
|
||||||
const auto to = HashTable::SettingAdapter{}.Convert<SharedHashTable>(from);
|
BOOST_CHECK_EQUAL(to.m_fixedValueSize, 20U);
|
||||||
|
|
||||||
BOOST_CHECK_EQUAL(to.m_numBuckets, 100U);
|
|
||||||
BOOST_CHECK_EQUAL(to.m_numBucketsPerMutex, 10U);
|
|
||||||
BOOST_CHECK_EQUAL(to.m_fixedKeySize, 5U);
|
|
||||||
BOOST_CHECK_EQUAL(to.m_fixedValueSize, 20U);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BOOST_AUTO_TEST_SUITE_END()
|
BOOST_AUTO_TEST_SUITE_END()
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,37 +1,27 @@
|
||||||
#include <boost/test/unit_test.hpp>
|
|
||||||
#include "Utils.h"
|
#include "Utils.h"
|
||||||
|
#include <boost/test/unit_test.hpp>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
namespace Utils {
|
||||||
{
|
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
|
void ValidateCounter(const HashTablePerfData& actual,
|
||||||
void ValidateCounter(
|
HashTablePerfCounter perfCounter,
|
||||||
const HashTablePerfData& actual,
|
PerfCounters<HashTablePerfCounter>::TValue expectedValue) {
|
||||||
HashTablePerfCounter perfCounter,
|
BOOST_CHECK_MESSAGE(
|
||||||
PerfCounters<HashTablePerfCounter>::TValue expectedValue)
|
actual.Get(perfCounter) == expectedValue,
|
||||||
{
|
c_hashTablePerfCounterNames[static_cast<std::size_t>(perfCounter)]
|
||||||
BOOST_CHECK_MESSAGE(
|
<< " counter: " << actual.Get(perfCounter)
|
||||||
actual.Get(perfCounter) == expectedValue,
|
<< " (actual) != " << expectedValue << " (expected).");
|
||||||
c_hashTablePerfCounterNames[static_cast<std::size_t>(perfCounter)]
|
|
||||||
<< " counter: "
|
|
||||||
<< actual.Get(perfCounter)
|
|
||||||
<< " (actual) != " << expectedValue << " (expected).");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ValidateCounters(
|
void ValidateCounters(const HashTablePerfData& actual,
|
||||||
const HashTablePerfData& actual,
|
const ExpectedCounterValues& expected) {
|
||||||
const ExpectedCounterValues& expected)
|
for (const auto& expectedCounter : expected) {
|
||||||
{
|
ValidateCounter(actual, expectedCounter.first, expectedCounter.second);
|
||||||
for (const auto& expectedCounter : expected)
|
}
|
||||||
{
|
|
||||||
ValidateCounter(actual, expectedCounter.first, expectedCounter.second);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Utils
|
} // namespace Utils
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,111 +1,88 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <string.h>
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "L4/Log/PerfCounter.h"
|
#include "L4/Log/PerfCounter.h"
|
||||||
#include "L4/Utils/Exception.h"
|
#include "L4/Utils/Exception.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
// Macro CHECK_EXCEPTION_THROWN
|
// Macro CHECK_EXCEPTION_THROWN
|
||||||
|
|
||||||
#define CHECK_EXCEPTION_THROWN(statement) \
|
#define CHECK_EXCEPTION_THROWN(statement) \
|
||||||
do { \
|
do { \
|
||||||
bool isExceptionThrown = false;\
|
bool isExceptionThrown = false; \
|
||||||
try \
|
try { \
|
||||||
{ \
|
statement; \
|
||||||
statement; \
|
} catch (const RuntimeException&) { \
|
||||||
} \
|
isExceptionThrown = true; \
|
||||||
catch (const RuntimeException&) \
|
} \
|
||||||
{ \
|
BOOST_CHECK(isExceptionThrown); \
|
||||||
isExceptionThrown = true; \
|
} while (0)
|
||||||
} \
|
|
||||||
BOOST_CHECK(isExceptionThrown); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
|
|
||||||
#define CHECK_EXCEPTION_THROWN_WITH_MESSAGE(statement, message) \
|
|
||||||
do { \
|
|
||||||
bool isExceptionThrown = false; \
|
|
||||||
std::string exceptionMsg; \
|
|
||||||
try \
|
|
||||||
{ \
|
|
||||||
statement; \
|
|
||||||
} \
|
|
||||||
catch (const RuntimeException& ex) \
|
|
||||||
{ \
|
|
||||||
isExceptionThrown = true; \
|
|
||||||
exceptionMsg = ex.what(); \
|
|
||||||
BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \
|
|
||||||
} \
|
|
||||||
BOOST_CHECK(isExceptionThrown); \
|
|
||||||
BOOST_CHECK(strcmp((message), exceptionMsg.c_str()) == 0); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
|
#define CHECK_EXCEPTION_THROWN_WITH_MESSAGE(statement, message) \
|
||||||
|
do { \
|
||||||
|
bool isExceptionThrown = false; \
|
||||||
|
std::string exceptionMsg; \
|
||||||
|
try { \
|
||||||
|
statement; \
|
||||||
|
} catch (const RuntimeException& ex) { \
|
||||||
|
isExceptionThrown = true; \
|
||||||
|
exceptionMsg = ex.what(); \
|
||||||
|
BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \
|
||||||
|
} \
|
||||||
|
BOOST_CHECK(isExceptionThrown); \
|
||||||
|
BOOST_CHECK(strcmp((message), exceptionMsg.c_str()) == 0); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
// This will validate the given message is a prefix of the exception message.
|
// This will validate the given message is a prefix of the exception message.
|
||||||
#define CHECK_EXCEPTION_THROWN_WITH_PREFIX_MESSAGE(statement, message) \
|
#define CHECK_EXCEPTION_THROWN_WITH_PREFIX_MESSAGE(statement, message) \
|
||||||
do { \
|
do { \
|
||||||
bool isExceptionThrown = false; \
|
bool isExceptionThrown = false; \
|
||||||
std::string exceptionMsg; \
|
std::string exceptionMsg; \
|
||||||
try \
|
try { \
|
||||||
{ \
|
statement; \
|
||||||
statement; \
|
} catch (const RuntimeException& ex) { \
|
||||||
} \
|
isExceptionThrown = true; \
|
||||||
catch (const RuntimeException& ex) \
|
exceptionMsg = ex.what(); \
|
||||||
{ \
|
BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \
|
||||||
isExceptionThrown = true; \
|
} \
|
||||||
exceptionMsg = ex.what(); \
|
BOOST_CHECK(isExceptionThrown); \
|
||||||
BOOST_TEST_MESSAGE("Exception Message: " << exceptionMsg); \
|
|
||||||
} \
|
|
||||||
BOOST_CHECK(isExceptionThrown); \
|
|
||||||
BOOST_CHECK(exceptionMsg.compare(0, strlen(message), message) == 0); \
|
BOOST_CHECK(exceptionMsg.compare(0, strlen(message), message) == 0); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
namespace Utils {
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T ConvertFromString(const char* str)
|
T ConvertFromString(const char* str) {
|
||||||
{
|
return T(reinterpret_cast<const std::uint8_t*>(str),
|
||||||
return T(
|
static_cast<typename T::size_type>(strlen(str)));
|
||||||
reinterpret_cast<const std::uint8_t*>(str),
|
|
||||||
static_cast<typename T::size_type>(strlen(str)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::string ConvertToString(const T& t)
|
std::string ConvertToString(const T& t) {
|
||||||
{
|
return std::string(reinterpret_cast<const char*>(t.m_data), t.m_size);
|
||||||
return std::string(reinterpret_cast<const char*>(t.m_data), t.m_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Counter related validation util function.
|
// Counter related validation util function.
|
||||||
|
|
||||||
using ExpectedCounterValues
|
using ExpectedCounterValues =
|
||||||
= std::vector<
|
std::vector<std::pair<HashTablePerfCounter,
|
||||||
std::pair<
|
typename PerfCounters<HashTablePerfCounter>::TValue>>;
|
||||||
HashTablePerfCounter,
|
|
||||||
typename PerfCounters<HashTablePerfCounter>::TValue>>;
|
|
||||||
|
|
||||||
// Validate the given perfData against the expected counter value.
|
// Validate the given perfData against the expected counter value.
|
||||||
void ValidateCounter(
|
void ValidateCounter(const HashTablePerfData& actual,
|
||||||
const HashTablePerfData& actual,
|
HashTablePerfCounter perfCounter,
|
||||||
HashTablePerfCounter perfCounter,
|
PerfCounters<HashTablePerfCounter>::TValue expectedValue);
|
||||||
PerfCounters<HashTablePerfCounter>::TValue expectedValue);
|
|
||||||
|
|
||||||
// Validate the given perfData against the expected counter values.
|
// Validate the given perfData against the expected counter values.
|
||||||
void ValidateCounters(
|
void ValidateCounters(const HashTablePerfData& actual,
|
||||||
const HashTablePerfData& actual,
|
const ExpectedCounterValues& expected);
|
||||||
const ExpectedCounterValues& expected);
|
|
||||||
|
|
||||||
} // namespace Utils
|
} // namespace Utils
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,54 +1,53 @@
|
||||||
#include <boost/test/unit_test.hpp>
|
|
||||||
#include <array>
|
#include <array>
|
||||||
|
#include <boost/test/unit_test.hpp>
|
||||||
#include "L4/Utils/Math.h"
|
#include "L4/Utils/Math.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace UnitTests {
|
||||||
namespace UnitTests
|
|
||||||
{
|
|
||||||
|
|
||||||
using namespace Utils;
|
using namespace Utils;
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(MathTest)
|
BOOST_AUTO_TEST_CASE(MathTest) {
|
||||||
{
|
// RoundUp tests.
|
||||||
// RoundUp tests.
|
BOOST_CHECK_EQUAL(Math::RoundUp(5, 10), 10);
|
||||||
BOOST_CHECK_EQUAL(Math::RoundUp(5, 10), 10);
|
BOOST_CHECK_EQUAL(Math::RoundUp(10, 10), 10);
|
||||||
BOOST_CHECK_EQUAL(Math::RoundUp(10, 10), 10);
|
BOOST_CHECK_EQUAL(Math::RoundUp(11, 10), 20);
|
||||||
BOOST_CHECK_EQUAL(Math::RoundUp(11, 10), 20);
|
BOOST_CHECK_EQUAL(Math::RoundUp(5, 0), 5);
|
||||||
BOOST_CHECK_EQUAL(Math::RoundUp(5, 0), 5);
|
|
||||||
|
|
||||||
// RoundDown tests.
|
// RoundDown tests.
|
||||||
BOOST_CHECK_EQUAL(Math::RoundDown(5, 10), 0);
|
BOOST_CHECK_EQUAL(Math::RoundDown(5, 10), 0);
|
||||||
BOOST_CHECK_EQUAL(Math::RoundDown(10, 10), 10);
|
BOOST_CHECK_EQUAL(Math::RoundDown(10, 10), 10);
|
||||||
BOOST_CHECK_EQUAL(Math::RoundDown(11, 10), 10);
|
BOOST_CHECK_EQUAL(Math::RoundDown(11, 10), 10);
|
||||||
BOOST_CHECK_EQUAL(Math::RoundDown(5, 0), 5);
|
BOOST_CHECK_EQUAL(Math::RoundDown(5, 0), 5);
|
||||||
|
|
||||||
// IsPowerOfTwo tests.
|
// IsPowerOfTwo tests.
|
||||||
BOOST_CHECK(Math::IsPowerOfTwo(2));
|
BOOST_CHECK(Math::IsPowerOfTwo(2));
|
||||||
BOOST_CHECK(Math::IsPowerOfTwo(4));
|
BOOST_CHECK(Math::IsPowerOfTwo(4));
|
||||||
BOOST_CHECK(!Math::IsPowerOfTwo(3));
|
BOOST_CHECK(!Math::IsPowerOfTwo(3));
|
||||||
BOOST_CHECK(!Math::IsPowerOfTwo(0));
|
BOOST_CHECK(!Math::IsPowerOfTwo(0));
|
||||||
|
|
||||||
// NextHighestPowerOfTwo tests.
|
// NextHighestPowerOfTwo tests.
|
||||||
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(0), 0U);
|
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(0), 0U);
|
||||||
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(1), 1U);
|
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(1), 1U);
|
||||||
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(2), 2U);
|
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(2), 2U);
|
||||||
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(3), 4U);
|
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(3), 4U);
|
||||||
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(4), 4U);
|
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(4), 4U);
|
||||||
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(5), 8U);
|
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(5), 8U);
|
||||||
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(200), 256U);
|
BOOST_CHECK_EQUAL(Math::NextHighestPowerOfTwo(200), 256U);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BOOST_AUTO_TEST_CASE(PointerArithmeticTest) {
|
||||||
|
std::array<int, 3> elements;
|
||||||
|
|
||||||
BOOST_AUTO_TEST_CASE(PointerArithmeticTest)
|
BOOST_CHECK(reinterpret_cast<int*>(Math::PointerArithmetic::Add(
|
||||||
{
|
&elements[0], sizeof(int))) == &elements[1]);
|
||||||
std::array<int, 3> elements;
|
BOOST_CHECK(reinterpret_cast<int*>(Math::PointerArithmetic::Subtract(
|
||||||
|
&elements[1], sizeof(int))) == &elements[0]);
|
||||||
BOOST_CHECK(reinterpret_cast<int*>(Math::PointerArithmetic::Add(&elements[0], sizeof(int))) == &elements[1]);
|
BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[2], &elements[0]) ==
|
||||||
BOOST_CHECK(reinterpret_cast<int*>(Math::PointerArithmetic::Subtract(&elements[1], sizeof(int))) == &elements[0]);
|
sizeof(int) * 2U);
|
||||||
BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[2], &elements[0]) == sizeof(int) * 2U);
|
BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[0], &elements[2]) ==
|
||||||
BOOST_CHECK(Math::PointerArithmetic::Distance(&elements[0], &elements[2]) == sizeof(int) * 2U);
|
sizeof(int) * 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace UnitTests
|
} // namespace UnitTests
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,32 +1,30 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// EpochManagerConfig struct.
|
// EpochManagerConfig struct.
|
||||||
struct EpochManagerConfig
|
struct EpochManagerConfig {
|
||||||
{
|
// "numActionQueues" indicates how many action containers there will be in
|
||||||
// "numActionQueues" indicates how many action containers there will be in order to
|
// order to increase the throughput of registering an action.
|
||||||
// increase the throughput of registering an action.
|
// "performActionsInParallelThreshold" indicates the threshold value above
|
||||||
// "performActionsInParallelThreshold" indicates the threshold value above which
|
// which the actions are performed in parallel.
|
||||||
// the actions are performed in parallel.
|
// "maxNumThreadsToPerformActions" indicates how many threads will be used
|
||||||
// "maxNumThreadsToPerformActions" indicates how many threads will be used when
|
// when performing an action in parallel.
|
||||||
// performing an action in parallel.
|
explicit EpochManagerConfig(
|
||||||
explicit EpochManagerConfig(
|
std::uint32_t epochQueueSize = 1000,
|
||||||
std::uint32_t epochQueueSize = 1000,
|
std::chrono::milliseconds epochProcessingInterval =
|
||||||
std::chrono::milliseconds epochProcessingInterval = std::chrono::milliseconds{ 1000 },
|
std::chrono::milliseconds{1000},
|
||||||
std::uint8_t numActionQueues = 1)
|
std::uint8_t numActionQueues = 1)
|
||||||
: m_epochQueueSize{ epochQueueSize }
|
: m_epochQueueSize{epochQueueSize},
|
||||||
, m_epochProcessingInterval{ epochProcessingInterval }
|
m_epochProcessingInterval{epochProcessingInterval},
|
||||||
, m_numActionQueues{ numActionQueues }
|
m_numActionQueues{numActionQueues} {}
|
||||||
{}
|
|
||||||
|
|
||||||
std::uint32_t m_epochQueueSize;
|
std::uint32_t m_epochQueueSize;
|
||||||
std::chrono::milliseconds m_epochProcessingInterval;
|
std::chrono::milliseconds m_epochProcessingInterval;
|
||||||
std::uint8_t m_numActionQueues;
|
std::uint8_t m_numActionQueues;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -11,53 +11,51 @@
|
||||||
#include "IEpochActionManager.h"
|
#include "IEpochActionManager.h"
|
||||||
#include "Utils/Lock.h"
|
#include "Utils/Lock.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
|
// EpochActionManager provides functionalities to add actions at an epoch and to
|
||||||
|
// perform actions up to the given epoch.
|
||||||
|
class EpochActionManager {
|
||||||
|
public:
|
||||||
|
// "numActionQueues" indicates how many action containers there will be in
|
||||||
|
// order to increase the throughput of registering an action. This will be
|
||||||
|
// re-calculated to the next highest power of two so that the "&" operator can
|
||||||
|
// be used for accessing the next queue.
|
||||||
|
explicit EpochActionManager(std::uint8_t numActionQueues);
|
||||||
|
|
||||||
// EpochActionManager provides functionalities to add actions at an epoch and to perform
|
// Adds an action at a given epoch counter.
|
||||||
// actions up to the given epoch.
|
// This function is thread-safe.
|
||||||
class EpochActionManager
|
void RegisterAction(std::uint64_t epochCounter,
|
||||||
{
|
IEpochActionManager::Action&& action);
|
||||||
public:
|
|
||||||
// "numActionQueues" indicates how many action containers there will be in order to
|
|
||||||
// increase the throughput of registering an action. This will be re-calculated to
|
|
||||||
// the next highest power of two so that the "&" operator can be used for accessing
|
|
||||||
// the next queue.
|
|
||||||
explicit EpochActionManager(std::uint8_t numActionQueues);
|
|
||||||
|
|
||||||
// Adds an action at a given epoch counter.
|
// Perform actions whose associated epoch counter value is less than
|
||||||
// This function is thread-safe.
|
// the given epoch counter value, and returns the number of actions performed.
|
||||||
void RegisterAction(std::uint64_t epochCounter, IEpochActionManager::Action&& action);
|
std::uint64_t PerformActions(std::uint64_t epochCounter);
|
||||||
|
|
||||||
// Perform actions whose associated epoch counter value is less than
|
EpochActionManager(const EpochActionManager&) = delete;
|
||||||
// the given epoch counter value, and returns the number of actions performed.
|
EpochActionManager& operator=(const EpochActionManager&) = delete;
|
||||||
std::uint64_t PerformActions(std::uint64_t epochCounter);
|
|
||||||
|
|
||||||
EpochActionManager(const EpochActionManager&) = delete;
|
private:
|
||||||
EpochActionManager& operator=(const EpochActionManager&) = delete;
|
using Mutex = Utils::CriticalSection;
|
||||||
|
using Lock = std::lock_guard<Mutex>;
|
||||||
|
|
||||||
private:
|
using Actions = std::vector<IEpochActionManager::Action>;
|
||||||
using Mutex = Utils::CriticalSection;
|
|
||||||
using Lock = std::lock_guard<Mutex>;
|
|
||||||
|
|
||||||
using Actions = std::vector<IEpochActionManager::Action>;
|
// The following structure needs to be sorted by the epoch counter.
|
||||||
|
// If the performance of using std::map becomes an issue, we can revisit this.
|
||||||
|
using EpochToActions = std::map<std::uint64_t, Actions>;
|
||||||
|
|
||||||
// The following structure needs to be sorted by the epoch counter.
|
using EpochToActionsWithLock =
|
||||||
// If the performance of using std::map becomes an issue, we can revisit this.
|
std::tuple<std::unique_ptr<Mutex>, EpochToActions>;
|
||||||
using EpochToActions = std::map<std::uint64_t, Actions>;
|
|
||||||
|
|
||||||
using EpochToActionsWithLock = std::tuple<std::unique_ptr<Mutex>, EpochToActions>;
|
// Run actions based on the configuration.
|
||||||
|
void ApplyActions(Actions& actions);
|
||||||
|
|
||||||
// Run actions based on the configuration.
|
// Stores mapping from a epoch counter to actions to perform.
|
||||||
void ApplyActions(Actions& actions);
|
std::vector<EpochToActionsWithLock> m_epochToActionsList;
|
||||||
|
|
||||||
// Stores mapping from a epoch counter to actions to perform.
|
// Used to point to the next EpochToActions to simulate round-robin access.
|
||||||
std::vector<EpochToActionsWithLock> m_epochToActionsList;
|
std::atomic<std::uint32_t> m_counter;
|
||||||
|
|
||||||
// Used to point to the next EpochToActions to simulate round-robin access.
|
|
||||||
std::atomic<std::uint32_t> m_counter;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -7,159 +7,142 @@
|
||||||
#include "Utils/Exception.h"
|
#include "Utils/Exception.h"
|
||||||
#include "Utils/Lock.h"
|
#include "Utils/Lock.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// EpochQueue struct represents reference counts for each epoch.
|
// EpochQueue struct represents reference counts for each epoch.
|
||||||
// Each value of the queue (fixed-size array) is the reference counts at an index,
|
// Each value of the queue (fixed-size array) is the reference counts at an
|
||||||
// where an index represents an epoch (time).
|
// index, where an index represents an epoch (time).
|
||||||
template <
|
template <typename TSharableLock,
|
||||||
typename TSharableLock,
|
typename TExclusiveLock,
|
||||||
typename TExclusiveLock,
|
typename Allocator = std::allocator<void> >
|
||||||
typename Allocator = std::allocator<void>
|
struct EpochQueue {
|
||||||
>
|
static_assert(std::is_same<typename TSharableLock::mutex_type,
|
||||||
struct EpochQueue
|
typename TExclusiveLock::mutex_type>::value,
|
||||||
{
|
"mutex type should be the same");
|
||||||
static_assert(
|
|
||||||
std::is_same<typename TSharableLock::mutex_type, typename TExclusiveLock::mutex_type>::value,
|
|
||||||
"mutex type should be the same");
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
EpochQueue(
|
EpochQueue(std::uint64_t epochCounter,
|
||||||
std::uint64_t epochCounter,
|
std::uint32_t queueSize,
|
||||||
std::uint32_t queueSize,
|
Allocator allocator = Allocator())
|
||||||
Allocator allocator = Allocator())
|
: m_frontIndex{epochCounter},
|
||||||
: m_frontIndex{ epochCounter }
|
m_backIndex{epochCounter},
|
||||||
, m_backIndex{ epochCounter }
|
m_mutexForBackIndex{},
|
||||||
, m_mutexForBackIndex{}
|
m_refCounts{
|
||||||
, m_refCounts{ queueSize, typename Allocator::template rebind<RefCount>::other(allocator) }
|
queueSize,
|
||||||
{
|
typename Allocator::template rebind<RefCount>::other(allocator)} {
|
||||||
if (queueSize == 0U)
|
if (queueSize == 0U) {
|
||||||
{
|
throw RuntimeException("Zero queue size is not allowed.");
|
||||||
throw RuntimeException("Zero queue size is not allowed.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
using SharableLock = TSharableLock;
|
using SharableLock = TSharableLock;
|
||||||
using ExclusiveLock = TExclusiveLock;
|
using ExclusiveLock = TExclusiveLock;
|
||||||
using RefCount = std::atomic<std::uint32_t>;
|
using RefCount = std::atomic<std::uint32_t>;
|
||||||
using RefCounts = Interprocess::Container::Vector<
|
using RefCounts = Interprocess::Container::
|
||||||
RefCount,
|
Vector<RefCount, typename Allocator::template rebind<RefCount>::other>;
|
||||||
typename Allocator::template rebind<RefCount>::other>;
|
|
||||||
|
|
||||||
// The followings (m_frontIndex and m_backIndex) are
|
// The followings (m_frontIndex and m_backIndex) are
|
||||||
// accessed/updated only by the owner thread (only one thread), thus
|
// accessed/updated only by the owner thread (only one thread), thus
|
||||||
// they don't require any synchronization.
|
// they don't require any synchronization.
|
||||||
std::size_t m_frontIndex;
|
std::size_t m_frontIndex;
|
||||||
|
|
||||||
// Back index represents the latest epoch counter value. Note that
|
// Back index represents the latest epoch counter value. Note that
|
||||||
// this is accessed/updated by multiple threads, thus requires
|
// this is accessed/updated by multiple threads, thus requires
|
||||||
// synchronization.
|
// synchronization.
|
||||||
std::size_t m_backIndex;
|
std::size_t m_backIndex;
|
||||||
|
|
||||||
// Read/Write lock for m_backIndex.
|
// Read/Write lock for m_backIndex.
|
||||||
typename SharableLock::mutex_type m_mutexForBackIndex;
|
typename SharableLock::mutex_type m_mutexForBackIndex;
|
||||||
|
|
||||||
// Reference counts per epoch count.
|
// Reference counts per epoch count.
|
||||||
// The index represents the epoch counter value and the value represents the reference counts.
|
// The index represents the epoch counter value and the value represents the
|
||||||
RefCounts m_refCounts;
|
// reference counts.
|
||||||
|
RefCounts m_refCounts;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// EpochRefManager provides functionality of adding/removing references
|
// EpochRefManager provides functionality of adding/removing references
|
||||||
// to the epoch counter.
|
// to the epoch counter.
|
||||||
template <typename EpochQueue>
|
template <typename EpochQueue>
|
||||||
class EpochRefManager
|
class EpochRefManager {
|
||||||
{
|
public:
|
||||||
public:
|
explicit EpochRefManager(EpochQueue& epochQueue) : m_epochQueue(epochQueue) {}
|
||||||
explicit EpochRefManager(EpochQueue& epochQueue)
|
|
||||||
: m_epochQueue(epochQueue)
|
|
||||||
{}
|
|
||||||
|
|
||||||
// Increment a reference to the current epoch counter.
|
// Increment a reference to the current epoch counter.
|
||||||
// This function is thread-safe.
|
// This function is thread-safe.
|
||||||
std::uint64_t AddRef()
|
std::uint64_t AddRef() {
|
||||||
{
|
// The synchronization is needed for EpochCounterManager::AddNewEpoch().
|
||||||
// The synchronization is needed for EpochCounterManager::AddNewEpoch().
|
typename EpochQueue::SharableLock lock(m_epochQueue.m_mutexForBackIndex);
|
||||||
typename EpochQueue::SharableLock lock(m_epochQueue.m_mutexForBackIndex);
|
|
||||||
|
|
||||||
++m_epochQueue.m_refCounts[m_epochQueue.m_backIndex % m_epochQueue.m_refCounts.size()];
|
++m_epochQueue.m_refCounts[m_epochQueue.m_backIndex %
|
||||||
|
m_epochQueue.m_refCounts.size()];
|
||||||
|
|
||||||
return m_epochQueue.m_backIndex;
|
return m_epochQueue.m_backIndex;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrement a reference count for the given epoch counter.
|
||||||
|
// This function is thread-safe.
|
||||||
|
void RemoveRef(std::uint64_t epochCounter) {
|
||||||
|
auto& refCounter =
|
||||||
|
m_epochQueue
|
||||||
|
.m_refCounts[epochCounter % m_epochQueue.m_refCounts.size()];
|
||||||
|
|
||||||
|
if (refCounter == 0) {
|
||||||
|
throw RuntimeException("Reference counter is invalid.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
--refCounter;
|
||||||
|
}
|
||||||
|
|
||||||
// Decrement a reference count for the given epoch counter.
|
EpochRefManager(const EpochRefManager&) = delete;
|
||||||
// This function is thread-safe.
|
EpochRefManager& operator=(const EpochRefManager&) = delete;
|
||||||
void RemoveRef(std::uint64_t epochCounter)
|
|
||||||
{
|
|
||||||
auto& refCounter = m_epochQueue.m_refCounts[epochCounter % m_epochQueue.m_refCounts.size()];
|
|
||||||
|
|
||||||
if (refCounter == 0)
|
private:
|
||||||
{
|
EpochQueue& m_epochQueue;
|
||||||
throw RuntimeException("Reference counter is invalid.");
|
|
||||||
}
|
|
||||||
|
|
||||||
--refCounter;
|
|
||||||
}
|
|
||||||
|
|
||||||
EpochRefManager(const EpochRefManager&) = delete;
|
|
||||||
EpochRefManager& operator=(const EpochRefManager&) = delete;
|
|
||||||
|
|
||||||
private:
|
|
||||||
EpochQueue& m_epochQueue;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// EpochCounterManager provides functionality of updating the current epoch
|
||||||
// EpochCounterManager provides functionality of updating the current epoch counter
|
// counter and getting the latest unreferenced epoch counter.
|
||||||
// and getting the latest unreferenced epoch counter.
|
|
||||||
template <typename EpochQueue>
|
template <typename EpochQueue>
|
||||||
class EpochCounterManager
|
class EpochCounterManager {
|
||||||
{
|
public:
|
||||||
public:
|
explicit EpochCounterManager(EpochQueue& epochQueue)
|
||||||
explicit EpochCounterManager(EpochQueue& epochQueue)
|
: m_epochQueue(epochQueue) {}
|
||||||
: m_epochQueue(epochQueue)
|
|
||||||
{}
|
|
||||||
|
|
||||||
// Increments the current epoch count by one.
|
// Increments the current epoch count by one.
|
||||||
// This function is thread-safe.
|
// This function is thread-safe.
|
||||||
void AddNewEpoch()
|
void AddNewEpoch() {
|
||||||
{
|
// The synchronization is needed for EpochRefManager::AddRef().
|
||||||
// The synchronization is needed for EpochRefManager::AddRef().
|
typename EpochQueue::ExclusiveLock lock(m_epochQueue.m_mutexForBackIndex);
|
||||||
typename EpochQueue::ExclusiveLock lock(m_epochQueue.m_mutexForBackIndex);
|
|
||||||
|
|
||||||
++m_epochQueue.m_backIndex;
|
++m_epochQueue.m_backIndex;
|
||||||
|
|
||||||
// TODO: check for the overwrap and throw.
|
// TODO: check for the overwrap and throw.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the epoch count in the queue where it is the biggest epoch
|
||||||
|
// count such that all other epoch counts' references are zeros.
|
||||||
|
// Note that this function is NOT thread safe, and should be run on the
|
||||||
|
// same thread as the one that calls AddNewEpoch().
|
||||||
|
std::uint64_t RemoveUnreferenceEpochCounters() {
|
||||||
|
while (m_epochQueue.m_backIndex > m_epochQueue.m_frontIndex) {
|
||||||
|
if (m_epochQueue.m_refCounts[m_epochQueue.m_frontIndex %
|
||||||
|
m_epochQueue.m_refCounts.size()] == 0U) {
|
||||||
|
++m_epochQueue.m_frontIndex;
|
||||||
|
} else {
|
||||||
|
// There are references to the front of the queue and will return this
|
||||||
|
// front index.
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the epoch count in the queue where it is the biggest epoch
|
return m_epochQueue.m_frontIndex;
|
||||||
// count such that all other epoch counts' references are zeros.
|
}
|
||||||
// Note that this function is NOT thread safe, and should be run on the
|
|
||||||
// same thread as the one that calls AddNewEpoch().
|
|
||||||
std::uint64_t RemoveUnreferenceEpochCounters()
|
|
||||||
{
|
|
||||||
while (m_epochQueue.m_backIndex > m_epochQueue.m_frontIndex)
|
|
||||||
{
|
|
||||||
if (m_epochQueue.m_refCounts[m_epochQueue.m_frontIndex % m_epochQueue.m_refCounts.size()] == 0U)
|
|
||||||
{
|
|
||||||
++m_epochQueue.m_frontIndex;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// There are references to the front of the queue and will return this front index.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return m_epochQueue.m_frontIndex;
|
EpochCounterManager(const EpochCounterManager&) = delete;
|
||||||
}
|
EpochCounterManager& operator=(const EpochCounterManager&) = delete;
|
||||||
|
|
||||||
EpochCounterManager(const EpochCounterManager&) = delete;
|
private:
|
||||||
EpochCounterManager& operator=(const EpochCounterManager&) = delete;
|
EpochQueue& m_epochQueue;
|
||||||
|
|
||||||
private:
|
|
||||||
EpochQueue& m_epochQueue;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,42 +1,37 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <boost/integer_traits.hpp>
|
#include <boost/integer_traits.hpp>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// EpochRefPolicy class
|
// EpochRefPolicy class
|
||||||
template <typename EpochRefManager>
|
template <typename EpochRefManager>
|
||||||
class EpochRefPolicy
|
class EpochRefPolicy {
|
||||||
{
|
public:
|
||||||
public:
|
explicit EpochRefPolicy(EpochRefManager& epochRefManager)
|
||||||
explicit EpochRefPolicy(EpochRefManager& epochRefManager)
|
: m_epochRefManager{epochRefManager},
|
||||||
: m_epochRefManager{ epochRefManager }
|
m_epochCounter{m_epochRefManager.AddRef()} {}
|
||||||
, m_epochCounter{ m_epochRefManager.AddRef() }
|
|
||||||
{}
|
|
||||||
|
|
||||||
EpochRefPolicy(EpochRefPolicy&& epochRefPolicy)
|
EpochRefPolicy(EpochRefPolicy&& epochRefPolicy)
|
||||||
: m_epochRefManager{ epochRefPolicy.m_epochRefManager }
|
: m_epochRefManager{epochRefPolicy.m_epochRefManager},
|
||||||
, m_epochCounter{ epochRefPolicy.m_epochCounter }
|
m_epochCounter{epochRefPolicy.m_epochCounter} {
|
||||||
{
|
epochRefPolicy.m_epochCounter =
|
||||||
epochRefPolicy.m_epochCounter = boost::integer_traits<std::uint64_t>::const_max;
|
boost::integer_traits<std::uint64_t>::const_max;
|
||||||
|
}
|
||||||
|
|
||||||
|
~EpochRefPolicy() {
|
||||||
|
if (m_epochCounter != boost::integer_traits<std::uint64_t>::const_max) {
|
||||||
|
m_epochRefManager.RemoveRef(m_epochCounter);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
~EpochRefPolicy()
|
EpochRefPolicy(const EpochRefPolicy&) = delete;
|
||||||
{
|
EpochRefPolicy& operator=(const EpochRefPolicy&) = delete;
|
||||||
if (m_epochCounter != boost::integer_traits<std::uint64_t>::const_max)
|
|
||||||
{
|
|
||||||
m_epochRefManager.RemoveRef(m_epochCounter);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
EpochRefPolicy(const EpochRefPolicy&) = delete;
|
private:
|
||||||
EpochRefPolicy& operator=(const EpochRefPolicy&) = delete;
|
EpochRefManager& m_epochRefManager;
|
||||||
|
std::uint64_t m_epochCounter;
|
||||||
private:
|
|
||||||
EpochRefManager& m_epochRefManager;
|
|
||||||
std::uint64_t m_epochCounter;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
|
@ -2,21 +2,17 @@
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
// IEpochActionManager interface exposes an API for registering an Action.
|
// IEpochActionManager interface exposes an API for registering an Action.
|
||||||
struct IEpochActionManager
|
struct IEpochActionManager {
|
||||||
{
|
using Action = std::function<void()>;
|
||||||
using Action = std::function<void()>;
|
|
||||||
|
|
||||||
virtual ~IEpochActionManager() {};
|
virtual ~IEpochActionManager(){};
|
||||||
|
|
||||||
// Register actions on the latest epoch in the queue and the action is
|
// Register actions on the latest epoch in the queue and the action is
|
||||||
// performed when the epoch is removed from the queue.
|
// performed when the epoch is removed from the queue.
|
||||||
virtual void RegisterAction(Action&& action) = 0;
|
virtual void RegisterAction(Action&& action) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -4,392 +4,352 @@
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include "detail/ToRawPointer.h"
|
|
||||||
#include "Epoch/IEpochActionManager.h"
|
#include "Epoch/IEpochActionManager.h"
|
||||||
|
#include "HashTable/Cache/Metadata.h"
|
||||||
#include "HashTable/IHashTable.h"
|
#include "HashTable/IHashTable.h"
|
||||||
#include "HashTable/ReadWrite/HashTable.h"
|
#include "HashTable/ReadWrite/HashTable.h"
|
||||||
#include "HashTable/Cache/Metadata.h"
|
|
||||||
#include "Utils/Clock.h"
|
#include "Utils/Clock.h"
|
||||||
|
#include "detail/ToRawPointer.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace HashTable {
|
||||||
namespace HashTable
|
namespace Cache {
|
||||||
{
|
|
||||||
namespace Cache
|
|
||||||
{
|
|
||||||
|
|
||||||
// ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides
|
// ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides
|
||||||
// the functionality to read data given a key.
|
// the functionality to read data given a key.
|
||||||
template <typename Allocator, typename Clock = Utils::EpochClock>
|
template <typename Allocator, typename Clock = Utils::EpochClock>
|
||||||
class ReadOnlyHashTable
|
class ReadOnlyHashTable
|
||||||
: public virtual ReadWrite::ReadOnlyHashTable<Allocator>
|
: public virtual ReadWrite::ReadOnlyHashTable<Allocator>,
|
||||||
, protected Clock
|
protected Clock {
|
||||||
{
|
public:
|
||||||
public:
|
using Base = ReadWrite::ReadOnlyHashTable<Allocator>;
|
||||||
using Base = ReadWrite::ReadOnlyHashTable<Allocator>;
|
using HashTable = typename Base::HashTable;
|
||||||
using HashTable = typename Base::HashTable;
|
|
||||||
|
|
||||||
using Key = typename Base::Key;
|
using Key = typename Base::Key;
|
||||||
using Value = typename Base::Value;
|
using Value = typename Base::Value;
|
||||||
using IIteratorPtr = typename Base::IIteratorPtr;
|
using IIteratorPtr = typename Base::IIteratorPtr;
|
||||||
|
|
||||||
class Iterator;
|
class Iterator;
|
||||||
|
|
||||||
ReadOnlyHashTable(
|
ReadOnlyHashTable(HashTable& hashTable, std::chrono::seconds recordTimeToLive)
|
||||||
HashTable& hashTable,
|
: Base(hashTable,
|
||||||
std::chrono::seconds recordTimeToLive)
|
RecordSerializer{hashTable.m_setting.m_fixedKeySize,
|
||||||
: Base(
|
hashTable.m_setting.m_fixedValueSize,
|
||||||
hashTable,
|
Metadata::c_metaDataSize}),
|
||||||
RecordSerializer{
|
m_recordTimeToLive{recordTimeToLive} {}
|
||||||
hashTable.m_setting.m_fixedKeySize,
|
|
||||||
hashTable.m_setting.m_fixedValueSize,
|
|
||||||
Metadata::c_metaDataSize })
|
|
||||||
, m_recordTimeToLive{ recordTimeToLive }
|
|
||||||
{}
|
|
||||||
|
|
||||||
virtual bool Get(const Key& key, Value& value) const override
|
virtual bool Get(const Key& key, Value& value) const override {
|
||||||
{
|
const auto status = GetInternal(key, value);
|
||||||
const auto status = GetInternal(key, value);
|
|
||||||
|
|
||||||
// Note that the following const_cast is safe and necessary to update cache hit information.
|
// Note that the following const_cast is safe and necessary to update cache
|
||||||
const_cast<HashTablePerfData&>(this->GetPerfData()).Increment(
|
// hit information.
|
||||||
status
|
const_cast<HashTablePerfData&>(this->GetPerfData())
|
||||||
? HashTablePerfCounter::CacheHitCount
|
.Increment(status ? HashTablePerfCounter::CacheHitCount
|
||||||
: HashTablePerfCounter::CacheMissCount);
|
: HashTablePerfCounter::CacheMissCount);
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual IIteratorPtr GetIterator() const override {
|
||||||
|
return std::make_unique<Iterator>(
|
||||||
|
this->m_hashTable, this->m_recordSerializer, m_recordTimeToLive,
|
||||||
|
this->GetCurrentEpochTime());
|
||||||
|
}
|
||||||
|
|
||||||
|
ReadOnlyHashTable(const ReadOnlyHashTable&) = delete;
|
||||||
|
ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
bool GetInternal(const Key& key, Value& value) const {
|
||||||
|
if (!Base::Get(key, value)) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual IIteratorPtr GetIterator() const override
|
assert(value.m_size > Metadata::c_metaDataSize);
|
||||||
{
|
|
||||||
return std::make_unique<Iterator>(
|
// If the record with the given key is found, check if the record is expired
|
||||||
this->m_hashTable,
|
// or not. Note that the following const_cast is safe and necessary to
|
||||||
this->m_recordSerializer,
|
// update the access status.
|
||||||
m_recordTimeToLive,
|
Metadata metaData{const_cast<std::uint32_t*>(
|
||||||
this->GetCurrentEpochTime());
|
reinterpret_cast<const std::uint32_t*>(value.m_data))};
|
||||||
|
if (metaData.IsExpired(this->GetCurrentEpochTime(), m_recordTimeToLive)) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
ReadOnlyHashTable(const ReadOnlyHashTable&) = delete;
|
metaData.UpdateAccessStatus(true);
|
||||||
ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete;
|
|
||||||
|
|
||||||
protected:
|
value.m_data += Metadata::c_metaDataSize;
|
||||||
bool GetInternal(const Key& key, Value& value) const
|
value.m_size -= Metadata::c_metaDataSize;
|
||||||
{
|
|
||||||
if (!Base::Get(key, value))
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(value.m_size > Metadata::c_metaDataSize);
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
// If the record with the given key is found, check if the record is expired or not.
|
std::chrono::seconds m_recordTimeToLive;
|
||||||
// Note that the following const_cast is safe and necessary to update the access status.
|
|
||||||
Metadata metaData{ const_cast<std::uint32_t*>(reinterpret_cast<const std::uint32_t*>(value.m_data)) };
|
|
||||||
if (metaData.IsExpired(this->GetCurrentEpochTime(), m_recordTimeToLive))
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
metaData.UpdateAccessStatus(true);
|
|
||||||
|
|
||||||
value.m_data += Metadata::c_metaDataSize;
|
|
||||||
value.m_size -= Metadata::c_metaDataSize;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::chrono::seconds m_recordTimeToLive;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
template <typename Allocator, typename Clock>
|
template <typename Allocator, typename Clock>
|
||||||
class ReadOnlyHashTable<Allocator, Clock>::Iterator : public Base::Iterator
|
class ReadOnlyHashTable<Allocator, Clock>::Iterator : public Base::Iterator {
|
||||||
{
|
public:
|
||||||
public:
|
using BaseIterator = typename Base::Iterator;
|
||||||
using BaseIterator = typename Base::Iterator;
|
|
||||||
|
|
||||||
Iterator(
|
Iterator(const HashTable& hashTable,
|
||||||
const HashTable& hashTable,
|
const RecordSerializer& recordDeserializer,
|
||||||
const RecordSerializer& recordDeserializer,
|
std::chrono::seconds recordTimeToLive,
|
||||||
std::chrono::seconds recordTimeToLive,
|
std::chrono::seconds currentEpochTime)
|
||||||
std::chrono::seconds currentEpochTime)
|
: BaseIterator(hashTable, recordDeserializer),
|
||||||
: BaseIterator(hashTable, recordDeserializer)
|
m_recordTimeToLive{recordTimeToLive},
|
||||||
, m_recordTimeToLive{ recordTimeToLive }
|
m_currentEpochTime{currentEpochTime} {}
|
||||||
, m_currentEpochTime{ currentEpochTime }
|
|
||||||
{}
|
|
||||||
|
|
||||||
Iterator(Iterator&& other)
|
Iterator(Iterator&& other)
|
||||||
: BaseIterator(std::move(other))
|
: BaseIterator(std::move(other)),
|
||||||
, m_recordTimeToLive{ std::move(other.m_recordTimeToLive) }
|
m_recordTimeToLive{std::move(other.m_recordTimeToLive)},
|
||||||
, m_currentEpochTime{ std::move(other.m_currentEpochTime) }
|
m_currentEpochTime{std::move(other.m_currentEpochTime)} {}
|
||||||
{}
|
|
||||||
|
|
||||||
bool MoveNext() override
|
bool MoveNext() override {
|
||||||
{
|
if (!BaseIterator::MoveNext()) {
|
||||||
if (!BaseIterator::MoveNext())
|
return false;
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
do
|
|
||||||
{
|
|
||||||
const Metadata metaData{
|
|
||||||
const_cast<std::uint32_t*>(
|
|
||||||
reinterpret_cast<const std::uint32_t*>(
|
|
||||||
BaseIterator::GetValue().m_data)) };
|
|
||||||
|
|
||||||
if (!metaData.IsExpired(m_currentEpochTime, m_recordTimeToLive))
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
} while (BaseIterator::MoveNext());
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Value GetValue() const override
|
do {
|
||||||
{
|
const Metadata metaData{
|
||||||
auto value = BaseIterator::GetValue();
|
const_cast<std::uint32_t*>(reinterpret_cast<const std::uint32_t*>(
|
||||||
value.m_data += Metadata::c_metaDataSize;
|
BaseIterator::GetValue().m_data))};
|
||||||
value.m_size -= Metadata::c_metaDataSize;
|
|
||||||
|
|
||||||
return value;
|
if (!metaData.IsExpired(m_currentEpochTime, m_recordTimeToLive)) {
|
||||||
}
|
return true;
|
||||||
|
}
|
||||||
|
} while (BaseIterator::MoveNext());
|
||||||
|
|
||||||
private:
|
return false;
|
||||||
std::chrono::seconds m_recordTimeToLive;
|
}
|
||||||
std::chrono::seconds m_currentEpochTime;
|
|
||||||
|
Value GetValue() const override {
|
||||||
|
auto value = BaseIterator::GetValue();
|
||||||
|
value.m_data += Metadata::c_metaDataSize;
|
||||||
|
value.m_size -= Metadata::c_metaDataSize;
|
||||||
|
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::chrono::seconds m_recordTimeToLive;
|
||||||
|
std::chrono::seconds m_currentEpochTime;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// The following warning is from the virtual inheritance and safe to disable in
|
||||||
// The following warning is from the virtual inheritance and safe to disable in this case.
|
// this case. https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx
|
||||||
// https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx
|
|
||||||
#pragma warning(push)
|
#pragma warning(push)
|
||||||
#pragma warning(disable:4250)
|
#pragma warning(disable : 4250)
|
||||||
|
|
||||||
// WritableHashTable class implements IWritableHashTable interface and also provides
|
// WritableHashTable class implements IWritableHashTable interface and also
|
||||||
// the read only access (Get()) to the hash table.
|
// provides the read only access (Get()) to the hash table.
|
||||||
template <typename Allocator, typename Clock = Utils::EpochClock>
|
template <typename Allocator, typename Clock = Utils::EpochClock>
|
||||||
class WritableHashTable
|
class WritableHashTable : public ReadOnlyHashTable<Allocator, Clock>,
|
||||||
: public ReadOnlyHashTable<Allocator, Clock>
|
public ReadWrite::WritableHashTable<Allocator> {
|
||||||
, public ReadWrite::WritableHashTable<Allocator>
|
public:
|
||||||
{
|
using ReadOnlyBase = ReadOnlyHashTable<Allocator, Clock>;
|
||||||
public:
|
using WritableBase = typename ReadWrite::WritableHashTable<Allocator>;
|
||||||
using ReadOnlyBase = ReadOnlyHashTable<Allocator, Clock>;
|
using HashTable = typename ReadOnlyBase::HashTable;
|
||||||
using WritableBase = typename ReadWrite::WritableHashTable<Allocator>;
|
|
||||||
using HashTable = typename ReadOnlyBase::HashTable;
|
|
||||||
|
|
||||||
using Key = typename ReadOnlyBase::Key;
|
using Key = typename ReadOnlyBase::Key;
|
||||||
using Value = typename ReadOnlyBase::Value;
|
using Value = typename ReadOnlyBase::Value;
|
||||||
using ISerializerPtr = typename WritableBase::ISerializerPtr;
|
using ISerializerPtr = typename WritableBase::ISerializerPtr;
|
||||||
|
|
||||||
WritableHashTable(
|
WritableHashTable(HashTable& hashTable,
|
||||||
HashTable& hashTable,
|
IEpochActionManager& epochManager,
|
||||||
IEpochActionManager& epochManager,
|
std::uint64_t maxCacheSizeInBytes,
|
||||||
std::uint64_t maxCacheSizeInBytes,
|
std::chrono::seconds recordTimeToLive,
|
||||||
std::chrono::seconds recordTimeToLive,
|
bool forceTimeBasedEviction)
|
||||||
bool forceTimeBasedEviction)
|
: ReadOnlyBase::Base(
|
||||||
: ReadOnlyBase::Base(
|
|
||||||
hashTable,
|
hashTable,
|
||||||
RecordSerializer{
|
RecordSerializer{hashTable.m_setting.m_fixedKeySize,
|
||||||
hashTable.m_setting.m_fixedKeySize,
|
hashTable.m_setting.m_fixedValueSize,
|
||||||
hashTable.m_setting.m_fixedValueSize,
|
Metadata::c_metaDataSize}),
|
||||||
Metadata::c_metaDataSize })
|
ReadOnlyBase(hashTable, recordTimeToLive),
|
||||||
, ReadOnlyBase(hashTable, recordTimeToLive)
|
WritableBase(hashTable, epochManager),
|
||||||
, WritableBase(hashTable, epochManager)
|
m_maxCacheSizeInBytes{maxCacheSizeInBytes},
|
||||||
, m_maxCacheSizeInBytes{ maxCacheSizeInBytes }
|
m_forceTimeBasedEviction{forceTimeBasedEviction},
|
||||||
, m_forceTimeBasedEviction{ forceTimeBasedEviction }
|
m_currentEvictBucketIndex{0U} {}
|
||||||
, m_currentEvictBucketIndex{ 0U }
|
|
||||||
{}
|
|
||||||
|
|
||||||
using ReadOnlyBase::Get;
|
using ReadOnlyBase::Get;
|
||||||
using ReadOnlyBase::GetPerfData;
|
using ReadOnlyBase::GetPerfData;
|
||||||
|
|
||||||
virtual void Add(const Key& key, const Value& value) override
|
virtual void Add(const Key& key, const Value& value) override {
|
||||||
{
|
if (m_forceTimeBasedEviction) {
|
||||||
if (m_forceTimeBasedEviction)
|
EvictBasedOnTime(key);
|
||||||
{
|
}
|
||||||
EvictBasedOnTime(key);
|
|
||||||
|
Evict(key.m_size + value.m_size + Metadata::c_metaDataSize);
|
||||||
|
|
||||||
|
WritableBase::Add(CreateRecordBuffer(key, value));
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ISerializerPtr GetSerializer() const override {
|
||||||
|
throw std::runtime_error("Not implemented yet.");
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
using Mutex = std::mutex;
|
||||||
|
using Lock = std::lock_guard<Mutex>;
|
||||||
|
|
||||||
|
void EvictBasedOnTime(const Key& key) {
|
||||||
|
const auto bucketIndex = this->GetBucketInfo(key).first;
|
||||||
|
|
||||||
|
auto* entry = &(this->m_hashTable.m_buckets[bucketIndex]);
|
||||||
|
|
||||||
|
const auto curEpochTime = this->GetCurrentEpochTime();
|
||||||
|
|
||||||
|
typename HashTable::Lock lock{this->m_hashTable.GetMutex(bucketIndex)};
|
||||||
|
|
||||||
|
while (entry != nullptr) {
|
||||||
|
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) {
|
||||||
|
const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed);
|
||||||
|
|
||||||
|
if (data != nullptr) {
|
||||||
|
const Metadata metadata{
|
||||||
|
const_cast<std::uint32_t*>(reinterpret_cast<const std::uint32_t*>(
|
||||||
|
this->m_recordSerializer.Deserialize(*data).m_value.m_data))};
|
||||||
|
|
||||||
|
if (metadata.IsExpired(curEpochTime, this->m_recordTimeToLive)) {
|
||||||
|
WritableBase::Remove(*entry, i);
|
||||||
|
this->m_hashTable.m_perfData.Increment(
|
||||||
|
HashTablePerfCounter::EvictedRecordsCount);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Evict(key.m_size + value.m_size + Metadata::c_metaDataSize);
|
entry = entry->m_next.Load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
WritableBase::Add(CreateRecordBuffer(key, value));
|
// Evict uses CLOCK algorithm to evict records based on expiration and access
|
||||||
|
// status until the number of bytes freed match the given number of bytes
|
||||||
|
// needed.
|
||||||
|
void Evict(std::uint64_t bytesNeeded) {
|
||||||
|
std::uint64_t numBytesToFree = CalculateNumBytesToFree(bytesNeeded);
|
||||||
|
if (numBytesToFree == 0U) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ISerializerPtr GetSerializer() const override
|
// Start evicting records with a lock.
|
||||||
{
|
Lock evictLock{m_evictMutex};
|
||||||
throw std::runtime_error("Not implemented yet.");
|
|
||||||
|
// Recalculate the number of bytes to free since other thread may have
|
||||||
|
// already evicted.
|
||||||
|
numBytesToFree = CalculateNumBytesToFree(bytesNeeded);
|
||||||
|
if (numBytesToFree == 0U) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
const auto curEpochTime = this->GetCurrentEpochTime();
|
||||||
using Mutex = std::mutex;
|
|
||||||
using Lock = std::lock_guard<Mutex>;
|
|
||||||
|
|
||||||
void EvictBasedOnTime(const Key& key)
|
// The max number of iterations we are going through per eviction is twice
|
||||||
{
|
// the number of buckets so that it can clear the access status. Note that
|
||||||
const auto bucketIndex = this->GetBucketInfo(key).first;
|
// this is the worst case scenario and the eviction process should exit much
|
||||||
|
// quicker in a normal case.
|
||||||
|
auto& buckets = this->m_hashTable.m_buckets;
|
||||||
|
std::uint64_t numIterationsRemaining = buckets.size() * 2U;
|
||||||
|
|
||||||
auto* entry = &(this->m_hashTable.m_buckets[bucketIndex]);
|
while (numBytesToFree > 0U && numIterationsRemaining-- > 0U) {
|
||||||
|
const auto currentBucketIndex =
|
||||||
|
m_currentEvictBucketIndex++ % buckets.size();
|
||||||
|
auto& bucket = buckets[currentBucketIndex];
|
||||||
|
|
||||||
const auto curEpochTime = this->GetCurrentEpochTime();
|
// Lock the bucket since another thread can bypass Evict() since
|
||||||
|
// TotalDataSize can be updated before the lock on m_evictMutex is
|
||||||
|
// released.
|
||||||
|
typename HashTable::UniqueLock lock{
|
||||||
|
this->m_hashTable.GetMutex(currentBucketIndex)};
|
||||||
|
typename HashTable::Entry* entry = &bucket;
|
||||||
|
|
||||||
typename HashTable::Lock lock{ this->m_hashTable.GetMutex(bucketIndex) };
|
while (entry != nullptr) {
|
||||||
|
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) {
|
||||||
|
const auto data =
|
||||||
|
entry->m_dataList[i].Load(std::memory_order_relaxed);
|
||||||
|
|
||||||
while (entry != nullptr)
|
if (data != nullptr) {
|
||||||
{
|
const auto record = this->m_recordSerializer.Deserialize(*data);
|
||||||
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
|
const auto& value = record.m_value;
|
||||||
{
|
|
||||||
const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed);
|
|
||||||
|
|
||||||
if (data != nullptr)
|
Metadata metadata{const_cast<std::uint32_t*>(
|
||||||
{
|
reinterpret_cast<const std::uint32_t*>(value.m_data))};
|
||||||
const Metadata metadata{
|
|
||||||
const_cast<std::uint32_t*>(
|
|
||||||
reinterpret_cast<const std::uint32_t*>(
|
|
||||||
this->m_recordSerializer.Deserialize(*data).m_value.m_data)) };
|
|
||||||
|
|
||||||
if (metadata.IsExpired(curEpochTime, this->m_recordTimeToLive))
|
// Evict this record if
|
||||||
{
|
// 1: the record is expired, or
|
||||||
WritableBase::Remove(*entry, i);
|
// 2: the entry is not recently accessed (and unset the access bit
|
||||||
this->m_hashTable.m_perfData.Increment(HashTablePerfCounter::EvictedRecordsCount);
|
// if set).
|
||||||
}
|
if (metadata.IsExpired(curEpochTime, this->m_recordTimeToLive) ||
|
||||||
}
|
!metadata.UpdateAccessStatus(false)) {
|
||||||
|
const auto numBytesFreed = record.m_key.m_size + value.m_size;
|
||||||
|
numBytesToFree = (numBytesFreed >= numBytesToFree)
|
||||||
|
? 0U
|
||||||
|
: numBytesToFree - numBytesFreed;
|
||||||
|
|
||||||
|
WritableBase::Remove(*entry, i);
|
||||||
|
|
||||||
|
this->m_hashTable.m_perfData.Increment(
|
||||||
|
HashTablePerfCounter::EvictedRecordsCount);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
entry = entry->m_next.Load(std::memory_order_relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entry = entry->m_next.Load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given the number of bytes needed, it calculates the number of bytes
|
||||||
|
// to free based on the max cache size.
|
||||||
|
std::uint64_t CalculateNumBytesToFree(std::uint64_t bytesNeeded) const {
|
||||||
|
const auto& perfData = GetPerfData();
|
||||||
|
|
||||||
|
const std::uint64_t totalDataSize =
|
||||||
|
perfData.Get(HashTablePerfCounter::TotalKeySize) +
|
||||||
|
perfData.Get(HashTablePerfCounter::TotalValueSize) +
|
||||||
|
perfData.Get(HashTablePerfCounter::TotalIndexSize);
|
||||||
|
|
||||||
|
if ((bytesNeeded < m_maxCacheSizeInBytes) &&
|
||||||
|
(totalDataSize + bytesNeeded <= m_maxCacheSizeInBytes)) {
|
||||||
|
// There are enough free bytes.
|
||||||
|
return 0U;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Evict uses CLOCK algorithm to evict records based on expiration and access status
|
// (totalDataSize > m_maxCacheSizeInBytes) case is possible:
|
||||||
// until the number of bytes freed match the given number of bytes needed.
|
// 1) If multiple threads are evicting and adding at the same time.
|
||||||
void Evict(std::uint64_t bytesNeeded)
|
// For example, if thread A was evicting and thread B could have
|
||||||
{
|
// used the evicted bytes before thread A consumed.
|
||||||
std::uint64_t numBytesToFree = CalculateNumBytesToFree(bytesNeeded);
|
// 2) If max cache size is set lower than expectation.
|
||||||
if (numBytesToFree == 0U)
|
return (totalDataSize > m_maxCacheSizeInBytes)
|
||||||
{
|
? (totalDataSize - m_maxCacheSizeInBytes + bytesNeeded)
|
||||||
return;
|
: bytesNeeded;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start evicting records with a lock.
|
RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value) {
|
||||||
Lock evictLock{ m_evictMutex };
|
const auto bufferSize =
|
||||||
|
this->m_recordSerializer.CalculateBufferSize(key, value);
|
||||||
|
auto buffer = Detail::to_raw_pointer(
|
||||||
|
this->m_hashTable.template GetAllocator<std::uint8_t>().allocate(
|
||||||
|
bufferSize));
|
||||||
|
|
||||||
// Recalculate the number of bytes to free since other thread may have already evicted.
|
std::uint32_t metaDataBuffer;
|
||||||
numBytesToFree = CalculateNumBytesToFree(bytesNeeded);
|
Metadata{&metaDataBuffer, this->GetCurrentEpochTime()};
|
||||||
if (numBytesToFree == 0U)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto curEpochTime = this->GetCurrentEpochTime();
|
// 4-byte Metadata is inserted between key and value buffer.
|
||||||
|
return this->m_recordSerializer.Serialize(
|
||||||
|
key, value,
|
||||||
|
Value{reinterpret_cast<std::uint8_t*>(&metaDataBuffer),
|
||||||
|
sizeof(metaDataBuffer)},
|
||||||
|
buffer, bufferSize);
|
||||||
|
}
|
||||||
|
|
||||||
// The max number of iterations we are going through per eviction is twice the number
|
Mutex m_evictMutex;
|
||||||
// of buckets so that it can clear the access status. Note that this is the worst
|
const std::uint64_t m_maxCacheSizeInBytes;
|
||||||
// case scenario and the eviction process should exit much quicker in a normal case.
|
const bool m_forceTimeBasedEviction;
|
||||||
auto& buckets = this->m_hashTable.m_buckets;
|
std::uint64_t m_currentEvictBucketIndex;
|
||||||
std::uint64_t numIterationsRemaining = buckets.size() * 2U;
|
|
||||||
|
|
||||||
while (numBytesToFree > 0U && numIterationsRemaining-- > 0U)
|
|
||||||
{
|
|
||||||
const auto currentBucketIndex = m_currentEvictBucketIndex++ % buckets.size();
|
|
||||||
auto& bucket = buckets[currentBucketIndex];
|
|
||||||
|
|
||||||
// Lock the bucket since another thread can bypass Evict() since TotalDataSize can
|
|
||||||
// be updated before the lock on m_evictMutex is released.
|
|
||||||
typename HashTable::UniqueLock lock{ this->m_hashTable.GetMutex(currentBucketIndex) };
|
|
||||||
typename HashTable::Entry* entry = &bucket;
|
|
||||||
|
|
||||||
while (entry != nullptr)
|
|
||||||
{
|
|
||||||
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
|
|
||||||
{
|
|
||||||
const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed);
|
|
||||||
|
|
||||||
if (data != nullptr)
|
|
||||||
{
|
|
||||||
const auto record = this->m_recordSerializer.Deserialize(*data);
|
|
||||||
const auto& value = record.m_value;
|
|
||||||
|
|
||||||
Metadata metadata{
|
|
||||||
const_cast<std::uint32_t*>(
|
|
||||||
reinterpret_cast<const std::uint32_t*>(
|
|
||||||
value.m_data)) };
|
|
||||||
|
|
||||||
// Evict this record if
|
|
||||||
// 1: the record is expired, or
|
|
||||||
// 2: the entry is not recently accessed (and unset the access bit if set).
|
|
||||||
if (metadata.IsExpired(curEpochTime, this->m_recordTimeToLive)
|
|
||||||
|| !metadata.UpdateAccessStatus(false))
|
|
||||||
{
|
|
||||||
const auto numBytesFreed = record.m_key.m_size + value.m_size;
|
|
||||||
numBytesToFree = (numBytesFreed >= numBytesToFree) ? 0U : numBytesToFree - numBytesFreed;
|
|
||||||
|
|
||||||
WritableBase::Remove(*entry, i);
|
|
||||||
|
|
||||||
this->m_hashTable.m_perfData.Increment(HashTablePerfCounter::EvictedRecordsCount);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
entry = entry->m_next.Load(std::memory_order_relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Given the number of bytes needed, it calculates the number of bytes
|
|
||||||
// to free based on the max cache size.
|
|
||||||
std::uint64_t CalculateNumBytesToFree(std::uint64_t bytesNeeded) const
|
|
||||||
{
|
|
||||||
const auto& perfData = GetPerfData();
|
|
||||||
|
|
||||||
const std::uint64_t totalDataSize =
|
|
||||||
perfData.Get(HashTablePerfCounter::TotalKeySize)
|
|
||||||
+ perfData.Get(HashTablePerfCounter::TotalValueSize)
|
|
||||||
+ perfData.Get(HashTablePerfCounter::TotalIndexSize);
|
|
||||||
|
|
||||||
if ((bytesNeeded < m_maxCacheSizeInBytes)
|
|
||||||
&& (totalDataSize + bytesNeeded <= m_maxCacheSizeInBytes))
|
|
||||||
{
|
|
||||||
// There are enough free bytes.
|
|
||||||
return 0U;
|
|
||||||
}
|
|
||||||
|
|
||||||
// (totalDataSize > m_maxCacheSizeInBytes) case is possible:
|
|
||||||
// 1) If multiple threads are evicting and adding at the same time.
|
|
||||||
// For example, if thread A was evicting and thread B could have
|
|
||||||
// used the evicted bytes before thread A consumed.
|
|
||||||
// 2) If max cache size is set lower than expectation.
|
|
||||||
return (totalDataSize > m_maxCacheSizeInBytes)
|
|
||||||
? (totalDataSize - m_maxCacheSizeInBytes + bytesNeeded)
|
|
||||||
: bytesNeeded;
|
|
||||||
}
|
|
||||||
|
|
||||||
RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value)
|
|
||||||
{
|
|
||||||
const auto bufferSize = this->m_recordSerializer.CalculateBufferSize(key, value);
|
|
||||||
auto buffer = Detail::to_raw_pointer(
|
|
||||||
this->m_hashTable.template GetAllocator<std::uint8_t>().allocate(bufferSize));
|
|
||||||
|
|
||||||
std::uint32_t metaDataBuffer;
|
|
||||||
Metadata{ &metaDataBuffer, this->GetCurrentEpochTime() };
|
|
||||||
|
|
||||||
// 4-byte Metadata is inserted between key and value buffer.
|
|
||||||
return this->m_recordSerializer.Serialize(
|
|
||||||
key,
|
|
||||||
value,
|
|
||||||
Value{ reinterpret_cast<std::uint8_t*>(&metaDataBuffer), sizeof(metaDataBuffer) },
|
|
||||||
buffer,
|
|
||||||
bufferSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
Mutex m_evictMutex;
|
|
||||||
const std::uint64_t m_maxCacheSizeInBytes;
|
|
||||||
const bool m_forceTimeBasedEviction;
|
|
||||||
std::uint64_t m_currentEvictBucketIndex;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#pragma warning(pop)
|
#pragma warning(pop)
|
||||||
|
|
||||||
} // namespace Cache
|
} // namespace Cache
|
||||||
} // namespace HashTable
|
} // namespace HashTable
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -4,113 +4,91 @@
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace HashTable {
|
||||||
namespace HashTable
|
namespace Cache {
|
||||||
{
|
|
||||||
namespace Cache
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
// Metadata class that stores caching related data.
|
// Metadata class that stores caching related data.
|
||||||
// It stores access bit to indicate whether a record is recently accessed
|
// It stores access bit to indicate whether a record is recently accessed
|
||||||
// as well as the epoch time when a record is created.
|
// as well as the epoch time when a record is created.
|
||||||
// Note that this works regardless of the alignment of the metadata passed in.
|
// Note that this works regardless of the alignment of the metadata passed in.
|
||||||
class Metadata
|
class Metadata {
|
||||||
{
|
public:
|
||||||
public:
|
// Constructs Metadata with the current epoch time.
|
||||||
// Constructs Metadata with the current epoch time.
|
Metadata(std::uint32_t* metadata, std::chrono::seconds curEpochTime)
|
||||||
Metadata(std::uint32_t* metadata, std::chrono::seconds curEpochTime)
|
: Metadata{metadata} {
|
||||||
: Metadata{ metadata }
|
*m_metadata = curEpochTime.count() & s_epochTimeMask;
|
||||||
{
|
}
|
||||||
*m_metadata = curEpochTime.count() & s_epochTimeMask;
|
|
||||||
|
explicit Metadata(std::uint32_t* metadata) : m_metadata{metadata} {
|
||||||
|
assert(m_metadata != nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the stored epoch time.
|
||||||
|
std::chrono::seconds GetEpochTime() const {
|
||||||
|
// *m_metadata even on the not-aligned memory should be fine since
|
||||||
|
// only the byte that contains the access bit is modified, and
|
||||||
|
// byte read is atomic.
|
||||||
|
return std::chrono::seconds{*m_metadata & s_epochTimeMask};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the stored epoch time is expired based
|
||||||
|
// on the given current epoch time and time-to-live value.
|
||||||
|
bool IsExpired(std::chrono::seconds curEpochTime,
|
||||||
|
std::chrono::seconds timeToLive) const {
|
||||||
|
assert(curEpochTime >= GetEpochTime());
|
||||||
|
return (curEpochTime - GetEpochTime()) > timeToLive;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the access status is on.
|
||||||
|
bool IsAccessed() const { return !!(GetAccessByte() & s_accessSetMask); }
|
||||||
|
|
||||||
|
// If "set" is true, turn on the access bit in the given metadata and store
|
||||||
|
// it. If "set" is false, turn off the access bit. Returns true if the given
|
||||||
|
// metadata's access bit was originally on.
|
||||||
|
bool UpdateAccessStatus(bool set) {
|
||||||
|
const auto isAccessBitOn = IsAccessed();
|
||||||
|
|
||||||
|
// Set the bit only if the bit is not set, and vice versa.
|
||||||
|
if (set != isAccessBitOn) {
|
||||||
|
if (set) {
|
||||||
|
GetAccessByte() |= s_accessSetMask;
|
||||||
|
} else {
|
||||||
|
GetAccessByte() &= s_accessUnsetMask;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit Metadata(std::uint32_t* metadata)
|
return isAccessBitOn;
|
||||||
: m_metadata{ metadata }
|
}
|
||||||
{
|
|
||||||
assert(m_metadata != nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the stored epoch time.
|
static constexpr std::uint16_t c_metaDataSize = sizeof(std::uint32_t);
|
||||||
std::chrono::seconds GetEpochTime() const
|
|
||||||
{
|
|
||||||
// *m_metadata even on the not-aligned memory should be fine since
|
|
||||||
// only the byte that contains the access bit is modified, and
|
|
||||||
// byte read is atomic.
|
|
||||||
return std::chrono::seconds{ *m_metadata & s_epochTimeMask };
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the stored epoch time is expired based
|
private:
|
||||||
// on the given current epoch time and time-to-live value.
|
std::uint8_t GetAccessByte() const {
|
||||||
bool IsExpired(
|
return reinterpret_cast<std::uint8_t*>(m_metadata)[s_accessBitByte];
|
||||||
std::chrono::seconds curEpochTime,
|
}
|
||||||
std::chrono::seconds timeToLive) const
|
|
||||||
{
|
|
||||||
assert(curEpochTime >= GetEpochTime());
|
|
||||||
return (curEpochTime - GetEpochTime()) > timeToLive;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns true if the access status is on.
|
std::uint8_t& GetAccessByte() {
|
||||||
bool IsAccessed() const
|
return reinterpret_cast<std::uint8_t*>(m_metadata)[s_accessBitByte];
|
||||||
{
|
}
|
||||||
return !!(GetAccessByte() & s_accessSetMask);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If "set" is true, turn on the access bit in the given metadata and store it.
|
// TODO: Create an endian test and assert it. (Works only on little endian).
|
||||||
// If "set" is false, turn off the access bit.
|
// The byte that contains the most significant bit.
|
||||||
// Returns true if the given metadata's access bit was originally on.
|
static constexpr std::uint8_t s_accessBitByte = 3U;
|
||||||
bool UpdateAccessStatus(bool set)
|
|
||||||
{
|
|
||||||
const auto isAccessBitOn = IsAccessed();
|
|
||||||
|
|
||||||
// Set the bit only if the bit is not set, and vice versa.
|
// Most significant bit is set.
|
||||||
if (set != isAccessBitOn)
|
static constexpr std::uint8_t s_accessSetMask = 1U << 7;
|
||||||
{
|
static constexpr std::uint8_t s_accessUnsetMask = s_accessSetMask ^ 0xFF;
|
||||||
if (set)
|
|
||||||
{
|
|
||||||
GetAccessByte() |= s_accessSetMask;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
GetAccessByte() &= s_accessUnsetMask;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return isAccessBitOn;
|
// The rest of bits other than the most significant bit are set.
|
||||||
}
|
static constexpr std::uint32_t s_epochTimeMask = 0x7FFFFFFF;
|
||||||
|
|
||||||
static constexpr std::uint16_t c_metaDataSize = sizeof(std::uint32_t);
|
// The most significant bit is a CLOCK bit. It is set to 1 upon access
|
||||||
|
// and reset to 0 by the cache eviction.
|
||||||
private:
|
// The rest of the bits are used for storing the epoch time in seconds.
|
||||||
std::uint8_t GetAccessByte() const
|
std::uint32_t* m_metadata = nullptr;
|
||||||
{
|
|
||||||
return reinterpret_cast<std::uint8_t*>(m_metadata)[s_accessBitByte];
|
|
||||||
}
|
|
||||||
|
|
||||||
std::uint8_t& GetAccessByte()
|
|
||||||
{
|
|
||||||
return reinterpret_cast<std::uint8_t*>(m_metadata)[s_accessBitByte];
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Create an endian test and assert it. (Works only on little endian).
|
|
||||||
// The byte that contains the most significant bit.
|
|
||||||
static constexpr std::uint8_t s_accessBitByte = 3U;
|
|
||||||
|
|
||||||
// Most significant bit is set.
|
|
||||||
static constexpr std::uint8_t s_accessSetMask = 1U << 7;
|
|
||||||
static constexpr std::uint8_t s_accessUnsetMask = s_accessSetMask ^ 0xFF;
|
|
||||||
|
|
||||||
// The rest of bits other than the most significant bit are set.
|
|
||||||
static constexpr std::uint32_t s_epochTimeMask = 0x7FFFFFFF;
|
|
||||||
|
|
||||||
// The most significant bit is a CLOCK bit. It is set to 1 upon access
|
|
||||||
// and reset to 0 by the cache eviction.
|
|
||||||
// The rest of the bits are used for storing the epoch time in seconds.
|
|
||||||
std::uint32_t* m_metadata = nullptr;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace Cache
|
||||||
} // namespace Cache
|
} // namespace HashTable
|
||||||
} // namespace HashTable
|
} // namespace L4
|
||||||
} // namespace L4
|
|
||||||
|
|
|
@ -4,221 +4,190 @@
|
||||||
#include "HashTable/IHashTable.h"
|
#include "HashTable/IHashTable.h"
|
||||||
#include "Utils/Exception.h"
|
#include "Utils/Exception.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace HashTable {
|
||||||
namespace HashTable
|
|
||||||
{
|
|
||||||
|
|
||||||
// Record struct consists of key and value pair.
|
// Record struct consists of key and value pair.
|
||||||
struct Record
|
struct Record {
|
||||||
{
|
using Key = IReadOnlyHashTable::Key;
|
||||||
using Key = IReadOnlyHashTable::Key;
|
using Value = IReadOnlyHashTable::Value;
|
||||||
using Value = IReadOnlyHashTable::Value;
|
|
||||||
|
|
||||||
Record() = default;
|
Record() = default;
|
||||||
|
|
||||||
Record(
|
Record(const Key& key, const Value& value) : m_key{key}, m_value{value} {}
|
||||||
const Key& key,
|
|
||||||
const Value& value)
|
|
||||||
: m_key{ key }
|
|
||||||
, m_value{ value }
|
|
||||||
{}
|
|
||||||
|
|
||||||
Key m_key;
|
Key m_key;
|
||||||
Value m_value;
|
Value m_value;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// RecordBuffer is a thin wrapper struct around a raw buffer array (pointer).
|
// RecordBuffer is a thin wrapper struct around a raw buffer array (pointer).
|
||||||
struct RecordBuffer
|
struct RecordBuffer {
|
||||||
{
|
std::uint8_t m_buffer[1];
|
||||||
std::uint8_t m_buffer[1];
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static_assert(
|
static_assert(sizeof(RecordBuffer) == 1,
|
||||||
sizeof(RecordBuffer) == 1,
|
"RecordBuffer size should be 1 to be a thin wrapper.");
|
||||||
"RecordBuffer size should be 1 to be a thin wrapper.");
|
|
||||||
|
|
||||||
// RecordSerializer provides a functionality to serialize/deserialize a record information.
|
// RecordSerializer provides a functionality to serialize/deserialize a record
|
||||||
class RecordSerializer
|
// information.
|
||||||
{
|
class RecordSerializer {
|
||||||
public:
|
public:
|
||||||
using Key = Record::Key;
|
using Key = Record::Key;
|
||||||
using Value = Record::Value;
|
using Value = Record::Value;
|
||||||
using KeySize = Key::size_type;
|
using KeySize = Key::size_type;
|
||||||
using ValueSize = Value::size_type;
|
using ValueSize = Value::size_type;
|
||||||
|
|
||||||
RecordSerializer(
|
RecordSerializer(KeySize fixedKeySize,
|
||||||
KeySize fixedKeySize,
|
ValueSize fixedValueSize,
|
||||||
ValueSize fixedValueSize,
|
ValueSize metadataSize = 0U)
|
||||||
ValueSize metadataSize = 0U)
|
: m_fixedKeySize{fixedKeySize},
|
||||||
: m_fixedKeySize{ fixedKeySize }
|
m_fixedValueSize{fixedValueSize},
|
||||||
, m_fixedValueSize{ fixedValueSize }
|
m_metadataSize{metadataSize} {}
|
||||||
, m_metadataSize{ metadataSize }
|
|
||||||
{}
|
|
||||||
|
|
||||||
// Returns the number of bytes needed for serializing the given key and value.
|
// Returns the number of bytes needed for serializing the given key and value.
|
||||||
std::size_t CalculateBufferSize(const Key& key, const Value& value) const
|
std::size_t CalculateBufferSize(const Key& key, const Value& value) const {
|
||||||
{
|
return ((m_fixedKeySize != 0) ? m_fixedKeySize
|
||||||
return
|
: (key.m_size + sizeof(KeySize))) +
|
||||||
((m_fixedKeySize != 0)
|
((m_fixedValueSize != 0)
|
||||||
? m_fixedKeySize
|
|
||||||
: (key.m_size + sizeof(KeySize)))
|
|
||||||
+ ((m_fixedValueSize != 0)
|
|
||||||
? m_fixedValueSize + m_metadataSize
|
? m_fixedValueSize + m_metadataSize
|
||||||
: (value.m_size + sizeof(ValueSize) + m_metadataSize));
|
: (value.m_size + sizeof(ValueSize) + m_metadataSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the number bytes used for key and value sizes.
|
// Returns the number bytes used for key and value sizes.
|
||||||
std::size_t CalculateRecordOverhead() const
|
std::size_t CalculateRecordOverhead() const {
|
||||||
{
|
return (m_fixedKeySize != 0 ? 0U : sizeof(KeySize)) +
|
||||||
return
|
(m_fixedValueSize != 0 ? 0U : sizeof(ValueSize));
|
||||||
(m_fixedKeySize != 0 ? 0U : sizeof(KeySize))
|
}
|
||||||
+ (m_fixedValueSize != 0 ? 0U : sizeof(ValueSize));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serializes the given key and value to the given buffer.
|
// Serializes the given key and value to the given buffer.
|
||||||
// Note that the buffer size is at least as big as the number of bytes
|
// Note that the buffer size is at least as big as the number of bytes
|
||||||
// returned by CalculateBufferSize().
|
// returned by CalculateBufferSize().
|
||||||
RecordBuffer* Serialize(
|
RecordBuffer* Serialize(const Key& key,
|
||||||
const Key& key,
|
const Value& value,
|
||||||
const Value& value,
|
std::uint8_t* const buffer,
|
||||||
std::uint8_t* const buffer,
|
std::size_t bufferSize) const {
|
||||||
std::size_t bufferSize) const
|
Validate(key, value);
|
||||||
{
|
|
||||||
Validate(key, value);
|
|
||||||
|
|
||||||
assert(CalculateBufferSize(key, value) <= bufferSize);
|
assert(CalculateBufferSize(key, value) <= bufferSize);
|
||||||
(void)bufferSize;
|
(void)bufferSize;
|
||||||
|
|
||||||
const auto start = SerializeSizes(buffer, key.m_size, value.m_size);
|
const auto start = SerializeSizes(buffer, key.m_size, value.m_size);
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size);
|
memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size);
|
||||||
memcpy_s(buffer + start + key.m_size, value.m_size, value.m_data, value.m_size);
|
memcpy_s(buffer + start + key.m_size, value.m_size, value.m_data,
|
||||||
|
value.m_size);
|
||||||
#else
|
#else
|
||||||
memcpy(buffer + start, key.m_data, key.m_size);
|
memcpy(buffer + start, key.m_data, key.m_size);
|
||||||
memcpy(buffer + start + key.m_size, value.m_data, value.m_size);
|
memcpy(buffer + start + key.m_size, value.m_data, value.m_size);
|
||||||
#endif
|
#endif
|
||||||
return reinterpret_cast<RecordBuffer*>(buffer);
|
return reinterpret_cast<RecordBuffer*>(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serializes the given key, value and meta value to the given buffer.
|
// Serializes the given key, value and meta value to the given buffer.
|
||||||
// The meta value is serialized between key and value.
|
// The meta value is serialized between key and value.
|
||||||
// Note that the buffer size is at least as big as the number of bytes
|
// Note that the buffer size is at least as big as the number of bytes
|
||||||
// returned by CalculateBufferSize().
|
// returned by CalculateBufferSize().
|
||||||
RecordBuffer* Serialize(
|
RecordBuffer* Serialize(const Key& key,
|
||||||
const Key& key,
|
const Value& value,
|
||||||
const Value& value,
|
const Value& metaValue,
|
||||||
const Value& metaValue,
|
std::uint8_t* const buffer,
|
||||||
std::uint8_t* const buffer,
|
std::size_t bufferSize) const {
|
||||||
std::size_t bufferSize) const
|
Validate(key, value, metaValue);
|
||||||
{
|
|
||||||
Validate(key, value, metaValue);
|
|
||||||
|
|
||||||
assert(CalculateBufferSize(key, value) <= bufferSize);
|
assert(CalculateBufferSize(key, value) <= bufferSize);
|
||||||
(void)bufferSize;
|
(void)bufferSize;
|
||||||
|
|
||||||
const auto start = SerializeSizes(buffer, key.m_size, value.m_size + metaValue.m_size);
|
const auto start =
|
||||||
|
SerializeSizes(buffer, key.m_size, value.m_size + metaValue.m_size);
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size);
|
memcpy_s(buffer + start, key.m_size, key.m_data, key.m_size);
|
||||||
memcpy_s(buffer + start + key.m_size, metaValue.m_size, metaValue.m_data, metaValue.m_size);
|
memcpy_s(buffer + start + key.m_size, metaValue.m_size, metaValue.m_data,
|
||||||
memcpy_s(buffer + start + key.m_size + metaValue.m_size, value.m_size, value.m_data, value.m_size);
|
metaValue.m_size);
|
||||||
|
memcpy_s(buffer + start + key.m_size + metaValue.m_size, value.m_size,
|
||||||
|
value.m_data, value.m_size);
|
||||||
#else
|
#else
|
||||||
memcpy(buffer + start, key.m_data, key.m_size);
|
memcpy(buffer + start, key.m_data, key.m_size);
|
||||||
memcpy(buffer + start + key.m_size, metaValue.m_data, metaValue.m_size);
|
memcpy(buffer + start + key.m_size, metaValue.m_data, metaValue.m_size);
|
||||||
memcpy(buffer + start + key.m_size + metaValue.m_size, value.m_data, value.m_size);
|
memcpy(buffer + start + key.m_size + metaValue.m_size, value.m_data,
|
||||||
|
value.m_size);
|
||||||
#endif
|
#endif
|
||||||
return reinterpret_cast<RecordBuffer*>(buffer);
|
return reinterpret_cast<RecordBuffer*>(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deserializes the given buffer and returns a Record object.
|
||||||
|
Record Deserialize(const RecordBuffer& buffer) const {
|
||||||
|
Record record;
|
||||||
|
|
||||||
|
const auto* dataBuffer = buffer.m_buffer;
|
||||||
|
|
||||||
|
auto& key = record.m_key;
|
||||||
|
if (m_fixedKeySize != 0) {
|
||||||
|
key.m_size = m_fixedKeySize;
|
||||||
|
} else {
|
||||||
|
key.m_size = *reinterpret_cast<const KeySize*>(dataBuffer);
|
||||||
|
dataBuffer += sizeof(KeySize);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deserializes the given buffer and returns a Record object.
|
auto& value = record.m_value;
|
||||||
Record Deserialize(const RecordBuffer& buffer) const
|
if (m_fixedValueSize != 0) {
|
||||||
{
|
value.m_size = m_fixedValueSize + m_metadataSize;
|
||||||
Record record;
|
} else {
|
||||||
|
value.m_size = *reinterpret_cast<const ValueSize*>(dataBuffer);
|
||||||
const auto* dataBuffer = buffer.m_buffer;
|
dataBuffer += sizeof(ValueSize);
|
||||||
|
|
||||||
auto& key = record.m_key;
|
|
||||||
if (m_fixedKeySize != 0)
|
|
||||||
{
|
|
||||||
key.m_size = m_fixedKeySize;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
key.m_size = *reinterpret_cast<const KeySize*>(dataBuffer);
|
|
||||||
dataBuffer += sizeof(KeySize);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& value = record.m_value;
|
|
||||||
if (m_fixedValueSize != 0)
|
|
||||||
{
|
|
||||||
value.m_size = m_fixedValueSize + m_metadataSize;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
value.m_size = *reinterpret_cast<const ValueSize*>(dataBuffer);
|
|
||||||
dataBuffer += sizeof(ValueSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
key.m_data = dataBuffer;
|
|
||||||
value.m_data = dataBuffer + key.m_size;
|
|
||||||
|
|
||||||
return record;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
key.m_data = dataBuffer;
|
||||||
// Validates key and value sizes when fixed sizes are set.
|
value.m_data = dataBuffer + key.m_size;
|
||||||
// Throws an exception if invalid sizes are used.
|
|
||||||
void Validate(const Key& key, const Value& value) const
|
return record;
|
||||||
{
|
}
|
||||||
if ((m_fixedKeySize != 0 && key.m_size != m_fixedKeySize)
|
|
||||||
|| (m_fixedValueSize != 0 && value.m_size != m_fixedValueSize))
|
private:
|
||||||
{
|
// Validates key and value sizes when fixed sizes are set.
|
||||||
throw RuntimeException("Invalid key or value sizes are given.");
|
// Throws an exception if invalid sizes are used.
|
||||||
}
|
void Validate(const Key& key, const Value& value) const {
|
||||||
|
if ((m_fixedKeySize != 0 && key.m_size != m_fixedKeySize) ||
|
||||||
|
(m_fixedValueSize != 0 && value.m_size != m_fixedValueSize)) {
|
||||||
|
throw RuntimeException("Invalid key or value sizes are given.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validates against the given meta value.
|
||||||
|
void Validate(const Key& key,
|
||||||
|
const Value& value,
|
||||||
|
const Value& metaValue) const {
|
||||||
|
Validate(key, value);
|
||||||
|
|
||||||
|
if (m_metadataSize != metaValue.m_size) {
|
||||||
|
throw RuntimeException("Invalid meta value size is given.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serializes size information to the given buffer.
|
||||||
|
// It assumes that buffer has enough size for serialization.
|
||||||
|
std::size_t SerializeSizes(std::uint8_t* const buffer,
|
||||||
|
KeySize keySize,
|
||||||
|
ValueSize valueSize) const {
|
||||||
|
auto curBuffer = buffer;
|
||||||
|
if (m_fixedKeySize == 0) {
|
||||||
|
*reinterpret_cast<KeySize*>(curBuffer) = keySize;
|
||||||
|
curBuffer += sizeof(keySize);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validates against the given meta value.
|
if (m_fixedValueSize == 0) {
|
||||||
void Validate(const Key& key, const Value& value, const Value& metaValue) const
|
*reinterpret_cast<ValueSize*>(curBuffer) = valueSize;
|
||||||
{
|
curBuffer += sizeof(valueSize);
|
||||||
Validate(key, value);
|
|
||||||
|
|
||||||
if (m_metadataSize != metaValue.m_size)
|
|
||||||
{
|
|
||||||
throw RuntimeException("Invalid meta value size is given.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serializes size information to the given buffer.
|
return curBuffer - buffer;
|
||||||
// It assumes that buffer has enough size for serialization.
|
}
|
||||||
std::size_t SerializeSizes(
|
|
||||||
std::uint8_t* const buffer,
|
|
||||||
KeySize keySize,
|
|
||||||
ValueSize valueSize) const
|
|
||||||
{
|
|
||||||
auto curBuffer = buffer;
|
|
||||||
if (m_fixedKeySize == 0)
|
|
||||||
{
|
|
||||||
*reinterpret_cast<KeySize*>(curBuffer) = keySize;
|
|
||||||
curBuffer += sizeof(keySize);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (m_fixedValueSize == 0)
|
const KeySize m_fixedKeySize;
|
||||||
{
|
const ValueSize m_fixedValueSize;
|
||||||
*reinterpret_cast<ValueSize*>(curBuffer) = valueSize;
|
const ValueSize m_metadataSize;
|
||||||
curBuffer += sizeof(valueSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
return curBuffer - buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
const KeySize m_fixedKeySize;
|
|
||||||
const ValueSize m_fixedValueSize;
|
|
||||||
const ValueSize m_metadataSize;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace HashTable
|
||||||
} // namespace HashTable
|
} // namespace L4
|
||||||
} // namespace L4
|
|
||||||
|
|
|
@ -4,29 +4,27 @@
|
||||||
#include "HashTable/Common/SharedHashTable.h"
|
#include "HashTable/Common/SharedHashTable.h"
|
||||||
#include "HashTable/Config.h"
|
#include "HashTable/Config.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace HashTable {
|
||||||
namespace HashTable
|
|
||||||
{
|
|
||||||
|
|
||||||
// SettingAdapter class provides a functionality to convert a HashTableConfig::Setting object
|
// SettingAdapter class provides a functionality to convert a
|
||||||
// to a SharedHashTable::Setting object.
|
// HashTableConfig::Setting object to a SharedHashTable::Setting object.
|
||||||
class SettingAdapter
|
class SettingAdapter {
|
||||||
{
|
public:
|
||||||
public:
|
template <typename SharedHashTable>
|
||||||
template <typename SharedHashTable>
|
typename SharedHashTable::Setting Convert(
|
||||||
typename SharedHashTable::Setting Convert(const HashTableConfig::Setting& from) const
|
const HashTableConfig::Setting& from) const {
|
||||||
{
|
typename SharedHashTable::Setting to;
|
||||||
typename SharedHashTable::Setting to;
|
|
||||||
|
|
||||||
to.m_numBuckets = from.m_numBuckets;
|
to.m_numBuckets = from.m_numBuckets;
|
||||||
to.m_numBucketsPerMutex = (std::max)(from.m_numBucketsPerMutex.get_value_or(1U), 1U);
|
to.m_numBucketsPerMutex =
|
||||||
to.m_fixedKeySize = from.m_fixedKeySize.get_value_or(0U);
|
(std::max)(from.m_numBucketsPerMutex.get_value_or(1U), 1U);
|
||||||
to.m_fixedValueSize = from.m_fixedValueSize.get_value_or(0U);
|
to.m_fixedKeySize = from.m_fixedKeySize.get_value_or(0U);
|
||||||
|
to.m_fixedValueSize = from.m_fixedValueSize.get_value_or(0U);
|
||||||
|
|
||||||
return to;
|
return to;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace HashTable
|
} // namespace HashTable
|
||||||
} // namespace L4
|
} // namespace L4
|
|
@ -10,197 +10,190 @@
|
||||||
#include "Utils/Exception.h"
|
#include "Utils/Exception.h"
|
||||||
#include "Utils/Lock.h"
|
#include "Utils/Lock.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace HashTable {
|
||||||
namespace HashTable
|
|
||||||
{
|
|
||||||
|
|
||||||
// SharedHashTable struct represents the hash table structure.
|
// SharedHashTable struct represents the hash table structure.
|
||||||
template <typename TData, typename TAllocator>
|
template <typename TData, typename TAllocator>
|
||||||
struct SharedHashTable
|
struct SharedHashTable {
|
||||||
{
|
using Data = TData;
|
||||||
using Data = TData;
|
using Allocator = TAllocator;
|
||||||
using Allocator = TAllocator;
|
|
||||||
|
|
||||||
// HashTable::Entry struct represents an entry in the chained bucket list.
|
// HashTable::Entry struct represents an entry in the chained bucket list.
|
||||||
// Entry layout is as follows:
|
// Entry layout is as follows:
|
||||||
//
|
//
|
||||||
// | tag1 | tag2 | tag3 | tag4 | tag5 | tag6 | tag7 | tag 8 | 1
|
// | tag1 | tag2 | tag3 | tag4 | tag5 | tag6 | tag7 | tag 8 | 1
|
||||||
// | tag9 | tag10 | tag11 | tag12 | tag13 | tag14 | tag15 | tag 16 | 2
|
// | tag9 | tag10 | tag11 | tag12 | tag13 | tag14 | tag15 | tag 16 | 2
|
||||||
// | Data1 pointer | 3
|
// | Data1 pointer | 3
|
||||||
// | Data2 pointer | 4
|
// | Data2 pointer | 4
|
||||||
// | Data3 pointer | 5
|
// | Data3 pointer | 5
|
||||||
// | Data4 pointer | 6
|
// | Data4 pointer | 6
|
||||||
// | Data5 pointer | 7
|
// | Data5 pointer | 7
|
||||||
// | Data6 pointer | 8
|
// | Data6 pointer | 8
|
||||||
// | Data7 pointer | 9
|
// | Data7 pointer | 9
|
||||||
// | Data8 pointer | 10
|
// | Data8 pointer | 10
|
||||||
// | Data9 pointer | 11
|
// | Data9 pointer | 11
|
||||||
// | Data10 pointer | 12
|
// | Data10 pointer | 12
|
||||||
// | Data11 pointer | 13
|
// | Data11 pointer | 13
|
||||||
// | Data12 pointer | 14
|
// | Data12 pointer | 14
|
||||||
// | Data13 pointer | 15
|
// | Data13 pointer | 15
|
||||||
// | Data14 pointer | 16
|
// | Data14 pointer | 16
|
||||||
// | Data15 pointer | 17
|
// | Data15 pointer | 17
|
||||||
// | Data16 pointer | 18
|
// | Data16 pointer | 18
|
||||||
// | Entry pointer to the next Entry | 19
|
// | Entry pointer to the next Entry | 19
|
||||||
// <----------------------8 bytes ---------------------------------->
|
// <----------------------8 bytes ---------------------------------->
|
||||||
// , where tag1 is a tag for Data1, tag2 for Data2, and so on. A tag value can be looked up
|
// , where tag1 is a tag for Data1, tag2 for Data2, and so on. A tag value can
|
||||||
// first before going to the corresponding Data for a quick check.
|
// be looked up first before going to the corresponding Data for a quick
|
||||||
// Also note that a byte read is atomic in modern processors so that tag is just
|
// check. Also note that a byte read is atomic in modern processors so that
|
||||||
// std::uint8_t instead of being atomic. Even in the case where the tag value read is a garbage ,
|
// tag is just std::uint8_t instead of being atomic. Even in the case where
|
||||||
// this is acceptable because of the followings:
|
// the tag value read is a garbage , this is acceptable because of the
|
||||||
// 1) if the garbage value was a hit where it should have been a miss: the actual key comparison will fail,
|
// followings:
|
||||||
// 2) if the garbage value was a miss where it should have been a hit: the key value must
|
// 1) if the garbage value was a hit where it should have been a miss: the
|
||||||
// have been changed since the tag was changed, so it will be looked up correctly
|
// actual key comparison will fail, 2) if the garbage value was a miss
|
||||||
// after the tag value written is visible correctly. Note that we don't need to guarantee the timing of
|
// where it should have been a hit: the key value must
|
||||||
// writing and reading (meaning the value written should be visible to the reader right away).
|
// have been changed since the tag was changed, so it will be looked up
|
||||||
//
|
// correctly after the tag value written is visible correctly. Note that
|
||||||
// Note about the CPU cache. In previous implementation, the Entry was 64 bytes to fit in the CPU cache.
|
// we don't need to guarantee the timing of writing and reading (meaning
|
||||||
// However, this resulted in lots of wasted space. For example, when the ratio of the number of expected records
|
// the value written should be visible to the reader right away).
|
||||||
// to the number of buckets was 2:1, only 85% buckets were occupied. After experiments, if you have 10:1 ratio,
|
//
|
||||||
// you will have 99.98% utilization of buckets. This required having more data per Entry, and the ideal number
|
// Note about the CPU cache. In previous implementation, the Entry was 64
|
||||||
// (after experiments) turned out to be 16 records per Entry. Also, because of how CPU fetches contiguous memory,
|
// bytes to fit in the CPU cache. However, this resulted in lots of wasted
|
||||||
// this didn't have any impact on micro-benchmarking.
|
// space. For example, when the ratio of the number of expected records to the
|
||||||
struct Entry
|
// number of buckets was 2:1, only 85% buckets were occupied. After
|
||||||
{
|
// experiments, if you have 10:1 ratio, you will have 99.98% utilization of
|
||||||
Entry() = default;
|
// buckets. This required having more data per Entry, and the ideal number
|
||||||
|
// (after experiments) turned out to be 16 records per Entry. Also, because of
|
||||||
|
// how CPU fetches contiguous memory, this didn't have any impact on
|
||||||
|
// micro-benchmarking.
|
||||||
|
struct Entry {
|
||||||
|
Entry() = default;
|
||||||
|
|
||||||
// Releases deallocates all the memories of the chained entries including
|
// Releases deallocates all the memories of the chained entries including
|
||||||
// the data list in the current Entry.
|
// the data list in the current Entry.
|
||||||
void Release(Allocator allocator)
|
void Release(Allocator allocator) {
|
||||||
{
|
auto dataDeleter = [allocator](auto& data) {
|
||||||
auto dataDeleter = [allocator](auto& data)
|
auto dataToDelete = data.Load();
|
||||||
{
|
if (dataToDelete != nullptr) {
|
||||||
auto dataToDelete = data.Load();
|
dataToDelete->~Data();
|
||||||
if (dataToDelete != nullptr)
|
typename Allocator::template rebind<Data>::other(allocator)
|
||||||
{
|
.deallocate(dataToDelete, 1U);
|
||||||
dataToDelete->~Data();
|
}
|
||||||
typename Allocator::template rebind<Data>::other(allocator).deallocate(dataToDelete, 1U);
|
};
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Delete all the chained entries, not including itself.
|
// Delete all the chained entries, not including itself.
|
||||||
auto curEntry = m_next.Load();
|
auto curEntry = m_next.Load();
|
||||||
|
|
||||||
while (curEntry != nullptr)
|
while (curEntry != nullptr) {
|
||||||
{
|
auto entryToDelete = curEntry;
|
||||||
auto entryToDelete = curEntry;
|
|
||||||
|
|
||||||
// Copy m_next for the next iteration.
|
// Copy m_next for the next iteration.
|
||||||
curEntry = entryToDelete->m_next.Load();
|
curEntry = entryToDelete->m_next.Load();
|
||||||
|
|
||||||
// Delete all the data within this entry.
|
// Delete all the data within this entry.
|
||||||
for (auto& data : entryToDelete->m_dataList)
|
for (auto& data : entryToDelete->m_dataList) {
|
||||||
{
|
dataDeleter(data);
|
||||||
dataDeleter(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean the current entry itself.
|
|
||||||
entryToDelete->~Entry();
|
|
||||||
typename Allocator::template rebind<Entry>::other(allocator).deallocate(entryToDelete, 1U);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete all the data from the head of chained entries.
|
|
||||||
for (auto& data : m_dataList)
|
|
||||||
{
|
|
||||||
dataDeleter(data);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr std::uint8_t c_numDataPerEntry = 16U;
|
// Clean the current entry itself.
|
||||||
|
entryToDelete->~Entry();
|
||||||
|
typename Allocator::template rebind<Entry>::other(allocator).deallocate(
|
||||||
|
entryToDelete, 1U);
|
||||||
|
}
|
||||||
|
|
||||||
std::array<std::uint8_t, c_numDataPerEntry> m_tags{ 0U };
|
// Delete all the data from the head of chained entries.
|
||||||
|
for (auto& data : m_dataList) {
|
||||||
std::array<Utils::AtomicOffsetPtr<Data>, c_numDataPerEntry> m_dataList{};
|
dataDeleter(data);
|
||||||
|
}
|
||||||
Utils::AtomicOffsetPtr<Entry> m_next{};
|
|
||||||
};
|
|
||||||
|
|
||||||
static_assert(sizeof(Entry) == 152, "Entry should be 152 bytes.");
|
|
||||||
|
|
||||||
struct Setting
|
|
||||||
{
|
|
||||||
using KeySize = IReadOnlyHashTable::Key::size_type;
|
|
||||||
using ValueSize = IReadOnlyHashTable::Value::size_type;
|
|
||||||
|
|
||||||
Setting() = default;
|
|
||||||
|
|
||||||
explicit Setting(
|
|
||||||
std::uint32_t numBuckets,
|
|
||||||
std::uint32_t numBucketsPerMutex = 1U,
|
|
||||||
KeySize fixedKeySize = 0U,
|
|
||||||
ValueSize fixedValueSize = 0U)
|
|
||||||
: m_numBuckets{ numBuckets }
|
|
||||||
, m_numBucketsPerMutex{ numBucketsPerMutex }
|
|
||||||
, m_fixedKeySize{ fixedKeySize }
|
|
||||||
, m_fixedValueSize{ fixedValueSize }
|
|
||||||
{}
|
|
||||||
|
|
||||||
std::uint32_t m_numBuckets = 1U;
|
|
||||||
std::uint32_t m_numBucketsPerMutex = 1U;
|
|
||||||
KeySize m_fixedKeySize = 0U;
|
|
||||||
ValueSize m_fixedValueSize = 0U;
|
|
||||||
};
|
|
||||||
|
|
||||||
SharedHashTable(
|
|
||||||
const Setting& setting,
|
|
||||||
Allocator allocator)
|
|
||||||
: m_allocator{ allocator }
|
|
||||||
, m_setting{ setting }
|
|
||||||
, m_buckets{ setting.m_numBuckets, typename Allocator::template rebind<Entry>::other(m_allocator) }
|
|
||||||
, m_mutexes{
|
|
||||||
(std::max)(setting.m_numBuckets / (std::max)(setting.m_numBucketsPerMutex, 1U), 1U),
|
|
||||||
typename Allocator::template rebind<Mutex>::other(m_allocator) }
|
|
||||||
, m_perfData{}
|
|
||||||
{
|
|
||||||
m_perfData.Set(HashTablePerfCounter::BucketsCount, m_buckets.size());
|
|
||||||
m_perfData.Set(
|
|
||||||
HashTablePerfCounter::TotalIndexSize,
|
|
||||||
(m_buckets.size() * sizeof(Entry))
|
|
||||||
+ (m_mutexes.size() * sizeof(Mutex))
|
|
||||||
+ sizeof(SharedHashTable));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
~SharedHashTable()
|
static constexpr std::uint8_t c_numDataPerEntry = 16U;
|
||||||
{
|
|
||||||
for (auto& bucket : m_buckets)
|
std::array<std::uint8_t, c_numDataPerEntry> m_tags{0U};
|
||||||
{
|
|
||||||
bucket.Release(m_allocator);
|
std::array<Utils::AtomicOffsetPtr<Data>, c_numDataPerEntry> m_dataList{};
|
||||||
}
|
|
||||||
|
Utils::AtomicOffsetPtr<Entry> m_next{};
|
||||||
|
};
|
||||||
|
|
||||||
|
static_assert(sizeof(Entry) == 152, "Entry should be 152 bytes.");
|
||||||
|
|
||||||
|
struct Setting {
|
||||||
|
using KeySize = IReadOnlyHashTable::Key::size_type;
|
||||||
|
using ValueSize = IReadOnlyHashTable::Value::size_type;
|
||||||
|
|
||||||
|
Setting() = default;
|
||||||
|
|
||||||
|
explicit Setting(std::uint32_t numBuckets,
|
||||||
|
std::uint32_t numBucketsPerMutex = 1U,
|
||||||
|
KeySize fixedKeySize = 0U,
|
||||||
|
ValueSize fixedValueSize = 0U)
|
||||||
|
: m_numBuckets{numBuckets},
|
||||||
|
m_numBucketsPerMutex{numBucketsPerMutex},
|
||||||
|
m_fixedKeySize{fixedKeySize},
|
||||||
|
m_fixedValueSize{fixedValueSize} {}
|
||||||
|
|
||||||
|
std::uint32_t m_numBuckets = 1U;
|
||||||
|
std::uint32_t m_numBucketsPerMutex = 1U;
|
||||||
|
KeySize m_fixedKeySize = 0U;
|
||||||
|
ValueSize m_fixedValueSize = 0U;
|
||||||
|
};
|
||||||
|
|
||||||
|
SharedHashTable(const Setting& setting, Allocator allocator)
|
||||||
|
: m_allocator{allocator},
|
||||||
|
m_setting{setting},
|
||||||
|
m_buckets{
|
||||||
|
setting.m_numBuckets,
|
||||||
|
typename Allocator::template rebind<Entry>::other(m_allocator)},
|
||||||
|
m_mutexes{
|
||||||
|
(std::max)(setting.m_numBuckets /
|
||||||
|
(std::max)(setting.m_numBucketsPerMutex, 1U),
|
||||||
|
1U),
|
||||||
|
typename Allocator::template rebind<Mutex>::other(m_allocator)},
|
||||||
|
m_perfData{} {
|
||||||
|
m_perfData.Set(HashTablePerfCounter::BucketsCount, m_buckets.size());
|
||||||
|
m_perfData.Set(HashTablePerfCounter::TotalIndexSize,
|
||||||
|
(m_buckets.size() * sizeof(Entry)) +
|
||||||
|
(m_mutexes.size() * sizeof(Mutex)) +
|
||||||
|
sizeof(SharedHashTable));
|
||||||
|
}
|
||||||
|
|
||||||
|
~SharedHashTable() {
|
||||||
|
for (auto& bucket : m_buckets) {
|
||||||
|
bucket.Release(m_allocator);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
using Mutex = Utils::ReaderWriterLockSlim;
|
using Mutex = Utils::ReaderWriterLockSlim;
|
||||||
using Lock = std::lock_guard<Mutex>;
|
using Lock = std::lock_guard<Mutex>;
|
||||||
using UniqueLock = std::unique_lock<Mutex>;
|
using UniqueLock = std::unique_lock<Mutex>;
|
||||||
|
|
||||||
using Buckets = Interprocess::Container::Vector<Entry, typename Allocator::template rebind<Entry>::other>;
|
using Buckets = Interprocess::Container::
|
||||||
using Mutexes = Interprocess::Container::Vector<Mutex, typename Allocator::template rebind<Mutex>::other>;
|
Vector<Entry, typename Allocator::template rebind<Entry>::other>;
|
||||||
|
using Mutexes = Interprocess::Container::
|
||||||
|
Vector<Mutex, typename Allocator::template rebind<Mutex>::other>;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
auto GetAllocator() const
|
auto GetAllocator() const {
|
||||||
{
|
return typename Allocator::template rebind<T>::other(m_allocator);
|
||||||
return typename Allocator::template rebind<T>::other(m_allocator);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Mutex& GetMutex(std::size_t index)
|
Mutex& GetMutex(std::size_t index) {
|
||||||
{
|
return m_mutexes[index % m_mutexes.size()];
|
||||||
return m_mutexes[index % m_mutexes.size()];
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Allocator m_allocator;
|
Allocator m_allocator;
|
||||||
|
|
||||||
const Setting m_setting;
|
const Setting m_setting;
|
||||||
|
|
||||||
Buckets m_buckets;
|
Buckets m_buckets;
|
||||||
|
|
||||||
Mutexes m_mutexes;
|
Mutexes m_mutexes;
|
||||||
|
|
||||||
HashTablePerfData m_perfData;
|
HashTablePerfData m_perfData;
|
||||||
|
|
||||||
SharedHashTable(const SharedHashTable&) = delete;
|
SharedHashTable(const SharedHashTable&) = delete;
|
||||||
SharedHashTable& operator=(const SharedHashTable&) = delete;
|
SharedHashTable& operator=(const SharedHashTable&) = delete;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace HashTable
|
} // namespace HashTable
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -2,89 +2,75 @@
|
||||||
|
|
||||||
#include <boost/optional.hpp>
|
#include <boost/optional.hpp>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstdint>
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
#include <cstdint>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include "HashTable/IHashTable.h"
|
#include "HashTable/IHashTable.h"
|
||||||
#include "Utils/Properties.h"
|
#include "Utils/Properties.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// HashTableConfig struct.
|
// HashTableConfig struct.
|
||||||
struct HashTableConfig
|
struct HashTableConfig {
|
||||||
{
|
struct Setting {
|
||||||
struct Setting
|
using KeySize = IReadOnlyHashTable::Key::size_type;
|
||||||
{
|
using ValueSize = IReadOnlyHashTable::Value::size_type;
|
||||||
using KeySize = IReadOnlyHashTable::Key::size_type;
|
|
||||||
using ValueSize = IReadOnlyHashTable::Value::size_type;
|
|
||||||
|
|
||||||
explicit Setting(
|
explicit Setting(std::uint32_t numBuckets,
|
||||||
std::uint32_t numBuckets,
|
boost::optional<std::uint32_t> numBucketsPerMutex = {},
|
||||||
boost::optional<std::uint32_t> numBucketsPerMutex = {},
|
boost::optional<KeySize> fixedKeySize = {},
|
||||||
boost::optional<KeySize> fixedKeySize = {},
|
boost::optional<ValueSize> fixedValueSize = {})
|
||||||
boost::optional<ValueSize> fixedValueSize = {})
|
: m_numBuckets{numBuckets},
|
||||||
: m_numBuckets{ numBuckets }
|
m_numBucketsPerMutex{numBucketsPerMutex},
|
||||||
, m_numBucketsPerMutex{ numBucketsPerMutex }
|
m_fixedKeySize{fixedKeySize},
|
||||||
, m_fixedKeySize{ fixedKeySize }
|
m_fixedValueSize{fixedValueSize} {}
|
||||||
, m_fixedValueSize{ fixedValueSize }
|
|
||||||
{}
|
|
||||||
|
|
||||||
std::uint32_t m_numBuckets;
|
std::uint32_t m_numBuckets;
|
||||||
boost::optional<std::uint32_t> m_numBucketsPerMutex;
|
boost::optional<std::uint32_t> m_numBucketsPerMutex;
|
||||||
boost::optional<KeySize> m_fixedKeySize;
|
boost::optional<KeySize> m_fixedKeySize;
|
||||||
boost::optional<ValueSize> m_fixedValueSize;
|
boost::optional<ValueSize> m_fixedValueSize;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Cache
|
struct Cache {
|
||||||
{
|
Cache(std::uint64_t maxCacheSizeInBytes,
|
||||||
Cache(
|
std::chrono::seconds recordTimeToLive,
|
||||||
std::uint64_t maxCacheSizeInBytes,
|
bool forceTimeBasedEviction)
|
||||||
std::chrono::seconds recordTimeToLive,
|
: m_maxCacheSizeInBytes{maxCacheSizeInBytes},
|
||||||
bool forceTimeBasedEviction)
|
m_recordTimeToLive{recordTimeToLive},
|
||||||
: m_maxCacheSizeInBytes{ maxCacheSizeInBytes }
|
m_forceTimeBasedEviction{forceTimeBasedEviction} {}
|
||||||
, m_recordTimeToLive{ recordTimeToLive }
|
|
||||||
, m_forceTimeBasedEviction{ forceTimeBasedEviction }
|
|
||||||
{}
|
|
||||||
|
|
||||||
std::uint64_t m_maxCacheSizeInBytes;
|
std::uint64_t m_maxCacheSizeInBytes;
|
||||||
std::chrono::seconds m_recordTimeToLive;
|
std::chrono::seconds m_recordTimeToLive;
|
||||||
bool m_forceTimeBasedEviction;
|
bool m_forceTimeBasedEviction;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Serializer
|
struct Serializer {
|
||||||
{
|
using Properties = Utils::Properties;
|
||||||
using Properties = Utils::Properties;
|
|
||||||
|
|
||||||
Serializer(
|
Serializer(std::shared_ptr<std::istream> stream = {},
|
||||||
std::shared_ptr<std::istream> stream = {},
|
boost::optional<Properties> properties = {})
|
||||||
boost::optional<Properties> properties = {})
|
: m_stream{stream}, m_properties{properties} {}
|
||||||
: m_stream{ stream }
|
|
||||||
, m_properties{ properties }
|
|
||||||
{}
|
|
||||||
|
|
||||||
std::shared_ptr<std::istream> m_stream;
|
std::shared_ptr<std::istream> m_stream;
|
||||||
boost::optional<Properties> m_properties;
|
boost::optional<Properties> m_properties;
|
||||||
};
|
};
|
||||||
|
|
||||||
HashTableConfig(
|
HashTableConfig(std::string name,
|
||||||
std::string name,
|
Setting setting,
|
||||||
Setting setting,
|
boost::optional<Cache> cache = {},
|
||||||
boost::optional<Cache> cache = {},
|
boost::optional<Serializer> serializer = {})
|
||||||
boost::optional<Serializer> serializer = {})
|
: m_name{std::move(name)},
|
||||||
: m_name{ std::move(name) }
|
m_setting{std::move(setting)},
|
||||||
, m_setting{ std::move(setting) }
|
m_cache{cache},
|
||||||
, m_cache{ cache }
|
m_serializer{serializer} {
|
||||||
, m_serializer{ serializer }
|
assert(m_setting.m_numBuckets > 0U ||
|
||||||
{
|
(m_serializer && (serializer->m_stream != nullptr)));
|
||||||
assert(m_setting.m_numBuckets > 0U
|
}
|
||||||
|| (m_serializer && (serializer->m_stream != nullptr)));
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string m_name;
|
std::string m_name;
|
||||||
Setting m_setting;
|
Setting m_setting;
|
||||||
boost::optional<Cache> m_cache;
|
boost::optional<Cache> m_cache;
|
||||||
boost::optional<Serializer> m_serializer;
|
boost::optional<Serializer> m_serializer;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -5,92 +5,79 @@
|
||||||
#include "Log/PerfCounter.h"
|
#include "Log/PerfCounter.h"
|
||||||
#include "Utils/Properties.h"
|
#include "Utils/Properties.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// IReadOnlyHashTable interface for read-only access to the hash table.
|
// IReadOnlyHashTable interface for read-only access to the hash table.
|
||||||
struct IReadOnlyHashTable
|
struct IReadOnlyHashTable {
|
||||||
{
|
// Blob struct that represents a memory blob.
|
||||||
// Blob struct that represents a memory blob.
|
template <typename TSize>
|
||||||
template <typename TSize>
|
struct Blob {
|
||||||
struct Blob
|
using size_type = TSize;
|
||||||
{
|
|
||||||
using size_type = TSize;
|
|
||||||
|
|
||||||
explicit Blob(const std::uint8_t* data = nullptr, size_type size = 0U)
|
explicit Blob(const std::uint8_t* data = nullptr, size_type size = 0U)
|
||||||
: m_data{ data }
|
: m_data{data}, m_size{size} {
|
||||||
, m_size{ size }
|
static_assert(std::numeric_limits<size_type>::is_integer,
|
||||||
{
|
"size_type is not an integer.");
|
||||||
static_assert(std::numeric_limits<size_type>::is_integer, "size_type is not an integer.");
|
}
|
||||||
}
|
|
||||||
|
|
||||||
bool operator==(const Blob& other) const
|
bool operator==(const Blob& other) const {
|
||||||
{
|
return (m_size == other.m_size) && !memcmp(m_data, other.m_data, m_size);
|
||||||
return (m_size == other.m_size)
|
}
|
||||||
&& !memcmp(m_data, other.m_data, m_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool operator!=(const Blob& other) const
|
bool operator!=(const Blob& other) const { return !(*this == other); }
|
||||||
{
|
|
||||||
return !(*this == other);
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::uint8_t* m_data;
|
const std::uint8_t* m_data;
|
||||||
size_type m_size;
|
size_type m_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
using Key = Blob<std::uint16_t>;
|
using Key = Blob<std::uint16_t>;
|
||||||
using Value = Blob<std::uint32_t>;
|
using Value = Blob<std::uint32_t>;
|
||||||
|
|
||||||
struct IIterator;
|
struct IIterator;
|
||||||
|
|
||||||
using IIteratorPtr = std::unique_ptr<IIterator>;
|
using IIteratorPtr = std::unique_ptr<IIterator>;
|
||||||
|
|
||||||
virtual ~IReadOnlyHashTable() = default;
|
virtual ~IReadOnlyHashTable() = default;
|
||||||
|
|
||||||
virtual bool Get(const Key& key, Value& value) const = 0;
|
virtual bool Get(const Key& key, Value& value) const = 0;
|
||||||
|
|
||||||
virtual IIteratorPtr GetIterator() const = 0;
|
virtual IIteratorPtr GetIterator() const = 0;
|
||||||
|
|
||||||
virtual const HashTablePerfData& GetPerfData() const = 0;
|
virtual const HashTablePerfData& GetPerfData() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
// IReadOnlyHashTable::IIterator interface for the hash table iterator.
|
// IReadOnlyHashTable::IIterator interface for the hash table iterator.
|
||||||
struct IReadOnlyHashTable::IIterator
|
struct IReadOnlyHashTable::IIterator {
|
||||||
{
|
virtual ~IIterator() = default;
|
||||||
virtual ~IIterator() = default;
|
|
||||||
|
|
||||||
virtual void Reset() = 0;
|
virtual void Reset() = 0;
|
||||||
|
|
||||||
virtual bool MoveNext() = 0;
|
virtual bool MoveNext() = 0;
|
||||||
|
|
||||||
virtual Key GetKey() const = 0;
|
virtual Key GetKey() const = 0;
|
||||||
|
|
||||||
virtual Value GetValue() const = 0;
|
virtual Value GetValue() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
// IWritableHashTable interface for write access to the hash table.
|
// IWritableHashTable interface for write access to the hash table.
|
||||||
struct IWritableHashTable : public virtual IReadOnlyHashTable
|
struct IWritableHashTable : public virtual IReadOnlyHashTable {
|
||||||
{
|
struct ISerializer;
|
||||||
struct ISerializer;
|
|
||||||
|
|
||||||
using ISerializerPtr = std::unique_ptr<ISerializer>;
|
using ISerializerPtr = std::unique_ptr<ISerializer>;
|
||||||
|
|
||||||
virtual void Add(const Key& key, const Value& value) = 0;
|
virtual void Add(const Key& key, const Value& value) = 0;
|
||||||
|
|
||||||
virtual bool Remove(const Key& key) = 0;
|
virtual bool Remove(const Key& key) = 0;
|
||||||
|
|
||||||
virtual ISerializerPtr GetSerializer() const = 0;
|
virtual ISerializerPtr GetSerializer() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
// IWritableHashTable::ISerializer interface for serializing hash table.
|
// IWritableHashTable::ISerializer interface for serializing hash table.
|
||||||
struct IWritableHashTable::ISerializer
|
struct IWritableHashTable::ISerializer {
|
||||||
{
|
virtual ~ISerializer() = default;
|
||||||
virtual ~ISerializer() = default;
|
|
||||||
|
|
||||||
virtual void Serialize(
|
virtual void Serialize(std::ostream& stream,
|
||||||
std::ostream& stream,
|
const Utils::Properties& properties) = 0;
|
||||||
const Utils::Properties& properties) = 0;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
|
@ -3,583 +3,515 @@
|
||||||
#include <boost/optional.hpp>
|
#include <boost/optional.hpp>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include "detail/ToRawPointer.h"
|
|
||||||
#include "Epoch/IEpochActionManager.h"
|
#include "Epoch/IEpochActionManager.h"
|
||||||
#include "HashTable/Common/SharedHashTable.h"
|
|
||||||
#include "HashTable/Common/Record.h"
|
#include "HashTable/Common/Record.h"
|
||||||
|
#include "HashTable/Common/SharedHashTable.h"
|
||||||
#include "HashTable/IHashTable.h"
|
#include "HashTable/IHashTable.h"
|
||||||
#include "HashTable/ReadWrite/Serializer.h"
|
#include "HashTable/ReadWrite/Serializer.h"
|
||||||
#include "Log/PerfCounter.h"
|
#include "Log/PerfCounter.h"
|
||||||
#include "Utils/Exception.h"
|
#include "Utils/Exception.h"
|
||||||
#include "Utils/MurmurHash3.h"
|
#include "Utils/MurmurHash3.h"
|
||||||
#include "Utils/Properties.h"
|
#include "Utils/Properties.h"
|
||||||
|
#include "detail/ToRawPointer.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// ReadWriteHashTable is a general purpose hash table where the look up is look free.
|
// ReadWriteHashTable is a general purpose hash table where the look up is look
|
||||||
namespace HashTable
|
// free.
|
||||||
{
|
namespace HashTable {
|
||||||
namespace ReadWrite
|
namespace ReadWrite {
|
||||||
{
|
|
||||||
|
|
||||||
// ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides
|
// ReadOnlyHashTable class implements IReadOnlyHashTable interface and provides
|
||||||
// the functionality to read data given a key.
|
// the functionality to read data given a key.
|
||||||
template <typename Allocator>
|
template <typename Allocator>
|
||||||
class ReadOnlyHashTable : public virtual IReadOnlyHashTable
|
class ReadOnlyHashTable : public virtual IReadOnlyHashTable {
|
||||||
{
|
public:
|
||||||
public:
|
using HashTable = SharedHashTable<RecordBuffer, Allocator>;
|
||||||
using HashTable = SharedHashTable<RecordBuffer, Allocator>;
|
|
||||||
|
|
||||||
class Iterator;
|
class Iterator;
|
||||||
|
|
||||||
explicit ReadOnlyHashTable(
|
explicit ReadOnlyHashTable(
|
||||||
HashTable& hashTable,
|
HashTable& hashTable,
|
||||||
boost::optional<RecordSerializer> recordSerializer = boost::none)
|
boost::optional<RecordSerializer> recordSerializer = boost::none)
|
||||||
: m_hashTable{ hashTable }
|
: m_hashTable{hashTable},
|
||||||
, m_recordSerializer{
|
m_recordSerializer{
|
||||||
recordSerializer
|
recordSerializer
|
||||||
? *recordSerializer
|
? *recordSerializer
|
||||||
: RecordSerializer{
|
: RecordSerializer{m_hashTable.m_setting.m_fixedKeySize,
|
||||||
m_hashTable.m_setting.m_fixedKeySize,
|
m_hashTable.m_setting.m_fixedValueSize}} {}
|
||||||
m_hashTable.m_setting.m_fixedValueSize } }
|
|
||||||
{}
|
|
||||||
|
|
||||||
virtual bool Get(const Key& key, Value& value) const override
|
virtual bool Get(const Key& key, Value& value) const override {
|
||||||
{
|
const auto bucketInfo = GetBucketInfo(key);
|
||||||
const auto bucketInfo = GetBucketInfo(key);
|
const auto* entry = &m_hashTable.m_buckets[bucketInfo.first];
|
||||||
const auto* entry = &m_hashTable.m_buckets[bucketInfo.first];
|
|
||||||
|
|
||||||
while (entry != nullptr)
|
while (entry != nullptr) {
|
||||||
{
|
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) {
|
||||||
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
|
if (bucketInfo.second == entry->m_tags[i]) {
|
||||||
{
|
// There could be a race condition where m_dataList[i] is updated
|
||||||
if (bucketInfo.second == entry->m_tags[i])
|
// during access. Therefore, load it once and save it (it's safe to
|
||||||
{
|
// store it b/c the memory will not be deleted until ref count becomes
|
||||||
// There could be a race condition where m_dataList[i] is updated during access.
|
// 0).
|
||||||
// Therefore, load it once and save it (it's safe to store it b/c the memory
|
const auto data =
|
||||||
// will not be deleted until ref count becomes 0).
|
entry->m_dataList[i].Load(std::memory_order_acquire);
|
||||||
const auto data = entry->m_dataList[i].Load(std::memory_order_acquire);
|
|
||||||
|
|
||||||
if (data != nullptr)
|
if (data != nullptr) {
|
||||||
{
|
const auto record = m_recordSerializer.Deserialize(*data);
|
||||||
const auto record = m_recordSerializer.Deserialize(*data);
|
if (record.m_key == key) {
|
||||||
if (record.m_key == key)
|
value = record.m_value;
|
||||||
{
|
return true;
|
||||||
value = record.m_value;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
entry = entry->m_next.Load(std::memory_order_acquire);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
entry = entry->m_next.Load(std::memory_order_acquire);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual IIteratorPtr GetIterator() const override
|
return false;
|
||||||
{
|
}
|
||||||
return std::make_unique<Iterator>(m_hashTable, m_recordSerializer);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual const HashTablePerfData& GetPerfData() const override
|
virtual IIteratorPtr GetIterator() const override {
|
||||||
{
|
return std::make_unique<Iterator>(m_hashTable, m_recordSerializer);
|
||||||
// Synchronizes with any std::memory_order_release if there exists, so that
|
}
|
||||||
// HashTablePerfData has the latest values at the moment when GetPerfData() is called.
|
|
||||||
std::atomic_thread_fence(std::memory_order_acquire);
|
|
||||||
return m_hashTable.m_perfData;
|
|
||||||
}
|
|
||||||
|
|
||||||
ReadOnlyHashTable(const ReadOnlyHashTable&) = delete;
|
virtual const HashTablePerfData& GetPerfData() const override {
|
||||||
ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete;
|
// Synchronizes with any std::memory_order_release if there exists, so that
|
||||||
|
// HashTablePerfData has the latest values at the moment when GetPerfData()
|
||||||
|
// is called.
|
||||||
|
std::atomic_thread_fence(std::memory_order_acquire);
|
||||||
|
return m_hashTable.m_perfData;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
ReadOnlyHashTable(const ReadOnlyHashTable&) = delete;
|
||||||
// GetBucketInfo returns a pair, where the first is the index to the bucket
|
ReadOnlyHashTable& operator=(const ReadOnlyHashTable&) = delete;
|
||||||
// and the second is the tag value for the given key.
|
|
||||||
// In this hash table, we treat tag value of 0 as empty (see WritableHashTable::Remove()),
|
|
||||||
// so in the worst case scenario, where an entry has an empty data list and the tag
|
|
||||||
// value returned for the key is 0, the look up cost is up to 6 checks. We can do something
|
|
||||||
// smarter by using the unused two bytes per Entry, but since an Entry object fits into
|
|
||||||
// CPU cache, the extra overhead should be minimal.
|
|
||||||
std::pair<std::uint32_t, std::uint8_t> GetBucketInfo(const Key& key) const
|
|
||||||
{
|
|
||||||
std::array<std::uint64_t, 2> hash;
|
|
||||||
MurmurHash3_x64_128(key.m_data, key.m_size, 0U, hash.data());
|
|
||||||
|
|
||||||
return {
|
protected:
|
||||||
static_cast<std::uint32_t>(hash[0] % m_hashTable.m_buckets.size()),
|
// GetBucketInfo returns a pair, where the first is the index to the bucket
|
||||||
static_cast<std::uint8_t>(hash[1]) };
|
// and the second is the tag value for the given key.
|
||||||
}
|
// In this hash table, we treat tag value of 0 as empty (see
|
||||||
|
// WritableHashTable::Remove()), so in the worst case scenario, where an entry
|
||||||
|
// has an empty data list and the tag value returned for the key is 0, the
|
||||||
|
// look up cost is up to 6 checks. We can do something smarter by using the
|
||||||
|
// unused two bytes per Entry, but since an Entry object fits into CPU cache,
|
||||||
|
// the extra overhead should be minimal.
|
||||||
|
std::pair<std::uint32_t, std::uint8_t> GetBucketInfo(const Key& key) const {
|
||||||
|
std::array<std::uint64_t, 2> hash;
|
||||||
|
MurmurHash3_x64_128(key.m_data, key.m_size, 0U, hash.data());
|
||||||
|
|
||||||
HashTable& m_hashTable;
|
return {static_cast<std::uint32_t>(hash[0] % m_hashTable.m_buckets.size()),
|
||||||
|
static_cast<std::uint8_t>(hash[1])};
|
||||||
|
}
|
||||||
|
|
||||||
RecordSerializer m_recordSerializer;
|
HashTable& m_hashTable;
|
||||||
|
|
||||||
|
RecordSerializer m_recordSerializer;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// ReadOnlyHashTable::Iterator class implements IIterator interface and provides
|
// ReadOnlyHashTable::Iterator class implements IIterator interface and provides
|
||||||
// read-only iterator for the ReadOnlyHashTable.
|
// read-only iterator for the ReadOnlyHashTable.
|
||||||
template <typename Allocator>
|
template <typename Allocator>
|
||||||
class ReadOnlyHashTable<Allocator>::Iterator : public IIterator
|
class ReadOnlyHashTable<Allocator>::Iterator : public IIterator {
|
||||||
{
|
public:
|
||||||
public:
|
Iterator(const HashTable& hashTable,
|
||||||
Iterator(
|
const RecordSerializer& recordDeserializer)
|
||||||
const HashTable& hashTable,
|
: m_hashTable{hashTable},
|
||||||
const RecordSerializer& recordDeserializer)
|
m_recordSerializer{recordDeserializer},
|
||||||
: m_hashTable{ hashTable }
|
m_currentBucketIndex{-1},
|
||||||
, m_recordSerializer{ recordDeserializer }
|
m_currentRecordIndex{0U},
|
||||||
, m_currentBucketIndex{ -1 }
|
m_currentEntry{nullptr} {}
|
||||||
, m_currentRecordIndex{ 0U }
|
|
||||||
, m_currentEntry{ nullptr }
|
|
||||||
{}
|
|
||||||
|
|
||||||
Iterator(Iterator&& iterator)
|
Iterator(Iterator&& iterator)
|
||||||
: m_hashTable{ std::move(iterator.m_hashTable) }
|
: m_hashTable{std::move(iterator.m_hashTable)},
|
||||||
, m_recordSerializer{ std::move(iterator.recordDeserializer) }
|
m_recordSerializer{std::move(iterator.recordDeserializer)},
|
||||||
, m_currentBucketIndex{ std::move(iterator.m_currentBucketIndex) }
|
m_currentBucketIndex{std::move(iterator.m_currentBucketIndex)},
|
||||||
, m_currentRecordIndex{ std::move(iterator.m_currentRecordIndex) }
|
m_currentRecordIndex{std::move(iterator.m_currentRecordIndex)},
|
||||||
, m_currentEntry{ std::move(iterator.m_currentEntry) }
|
m_currentEntry{std::move(iterator.m_currentEntry)} {}
|
||||||
{}
|
|
||||||
|
|
||||||
void Reset() override
|
void Reset() override {
|
||||||
{
|
m_currentBucketIndex = -1;
|
||||||
m_currentBucketIndex = -1;
|
m_currentRecordIndex = 0U;
|
||||||
|
m_currentEntry = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MoveNext() override {
|
||||||
|
if (IsEnd()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (m_currentEntry != nullptr) {
|
||||||
|
MoveToNextData();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(m_currentRecordIndex < HashTable::Entry::c_numDataPerEntry);
|
||||||
|
|
||||||
|
while ((m_currentEntry == nullptr) ||
|
||||||
|
(m_currentRecord =
|
||||||
|
m_currentEntry->m_dataList[m_currentRecordIndex].Load()) ==
|
||||||
|
nullptr) {
|
||||||
|
if (m_currentEntry == nullptr) {
|
||||||
|
++m_currentBucketIndex;
|
||||||
m_currentRecordIndex = 0U;
|
m_currentRecordIndex = 0U;
|
||||||
m_currentEntry = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MoveNext() override
|
if (IsEnd()) {
|
||||||
{
|
return false;
|
||||||
if (IsEnd())
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m_currentEntry != nullptr)
|
m_currentEntry = &m_hashTable.m_buckets[m_currentBucketIndex];
|
||||||
{
|
} else {
|
||||||
MoveToNextData();
|
MoveToNextData();
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(m_currentRecordIndex < HashTable::Entry::c_numDataPerEntry);
|
|
||||||
|
|
||||||
while ((m_currentEntry == nullptr)
|
|
||||||
|| (m_currentRecord = m_currentEntry->m_dataList[m_currentRecordIndex].Load()) == nullptr)
|
|
||||||
{
|
|
||||||
if (m_currentEntry == nullptr)
|
|
||||||
{
|
|
||||||
++m_currentBucketIndex;
|
|
||||||
m_currentRecordIndex = 0U;
|
|
||||||
|
|
||||||
if (IsEnd())
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
m_currentEntry = &m_hashTable.m_buckets[m_currentBucketIndex];
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
MoveToNextData();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(m_currentEntry != nullptr);
|
|
||||||
assert(m_currentRecord != nullptr);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Key GetKey() const override
|
assert(m_currentEntry != nullptr);
|
||||||
{
|
assert(m_currentRecord != nullptr);
|
||||||
if (!IsValid())
|
|
||||||
{
|
|
||||||
throw RuntimeException("HashTableIterator is not correctly used.");
|
|
||||||
}
|
|
||||||
|
|
||||||
return m_recordSerializer.Deserialize(*m_currentRecord).m_key;
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Key GetKey() const override {
|
||||||
|
if (!IsValid()) {
|
||||||
|
throw RuntimeException("HashTableIterator is not correctly used.");
|
||||||
}
|
}
|
||||||
|
|
||||||
Value GetValue() const override
|
return m_recordSerializer.Deserialize(*m_currentRecord).m_key;
|
||||||
{
|
}
|
||||||
if (!IsValid())
|
|
||||||
{
|
|
||||||
throw RuntimeException("HashTableIterator is not correctly used.");
|
|
||||||
}
|
|
||||||
|
|
||||||
return m_recordSerializer.Deserialize(*m_currentRecord).m_value;
|
Value GetValue() const override {
|
||||||
|
if (!IsValid()) {
|
||||||
|
throw RuntimeException("HashTableIterator is not correctly used.");
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator(const Iterator&) = delete;
|
return m_recordSerializer.Deserialize(*m_currentRecord).m_value;
|
||||||
Iterator& operator=(const Iterator&) = delete;
|
}
|
||||||
|
|
||||||
private:
|
Iterator(const Iterator&) = delete;
|
||||||
bool IsValid() const
|
Iterator& operator=(const Iterator&) = delete;
|
||||||
{
|
|
||||||
return !IsEnd()
|
private:
|
||||||
&& (m_currentEntry != nullptr)
|
bool IsValid() const {
|
||||||
&& (m_currentRecord != nullptr);
|
return !IsEnd() && (m_currentEntry != nullptr) &&
|
||||||
|
(m_currentRecord != nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsEnd() const {
|
||||||
|
return m_currentBucketIndex ==
|
||||||
|
static_cast<std::int64_t>(m_hashTable.m_buckets.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
void MoveToNextData() {
|
||||||
|
if (++m_currentRecordIndex >= HashTable::Entry::c_numDataPerEntry) {
|
||||||
|
m_currentRecordIndex = 0U;
|
||||||
|
m_currentEntry = m_currentEntry->m_next.Load();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool IsEnd() const
|
const HashTable& m_hashTable;
|
||||||
{
|
const RecordSerializer& m_recordSerializer;
|
||||||
return m_currentBucketIndex == static_cast<std::int64_t>(m_hashTable.m_buckets.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
void MoveToNextData()
|
std::int64_t m_currentBucketIndex;
|
||||||
{
|
std::uint8_t m_currentRecordIndex;
|
||||||
if (++m_currentRecordIndex >= HashTable::Entry::c_numDataPerEntry)
|
|
||||||
{
|
|
||||||
m_currentRecordIndex = 0U;
|
|
||||||
m_currentEntry = m_currentEntry->m_next.Load();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const HashTable& m_hashTable;
|
const typename HashTable::Entry* m_currentEntry;
|
||||||
const RecordSerializer& m_recordSerializer;
|
const RecordBuffer* m_currentRecord;
|
||||||
|
|
||||||
std::int64_t m_currentBucketIndex;
|
|
||||||
std::uint8_t m_currentRecordIndex;
|
|
||||||
|
|
||||||
const typename HashTable::Entry* m_currentEntry;
|
|
||||||
const RecordBuffer* m_currentRecord;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// The following warning is from the virtual inheritance and safe to disable in
|
||||||
// The following warning is from the virtual inheritance and safe to disable in this case.
|
// this case. https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx
|
||||||
// https://msdn.microsoft.com/en-us/library/6b3sy7ae.aspx
|
|
||||||
#pragma warning(push)
|
#pragma warning(push)
|
||||||
#pragma warning(disable:4250)
|
#pragma warning(disable : 4250)
|
||||||
|
|
||||||
// WritableHashTable class implements IWritableHashTable interface and also provides
|
// WritableHashTable class implements IWritableHashTable interface and also
|
||||||
// the read only access (Get()) to the hash table.
|
// provides the read only access (Get()) to the hash table. Note the virtual
|
||||||
// Note the virtual inheritance on ReadOnlyHashTable<Allocator> so that any derived class
|
// inheritance on ReadOnlyHashTable<Allocator> so that any derived class can
|
||||||
// can have only one ReadOnlyHashTable base class instance.
|
// have only one ReadOnlyHashTable base class instance.
|
||||||
template <typename Allocator>
|
template <typename Allocator>
|
||||||
class WritableHashTable
|
class WritableHashTable : public virtual ReadOnlyHashTable<Allocator>,
|
||||||
: public virtual ReadOnlyHashTable<Allocator>
|
public IWritableHashTable {
|
||||||
, public IWritableHashTable
|
public:
|
||||||
{
|
using Base = ReadOnlyHashTable<Allocator>;
|
||||||
public:
|
using HashTable = typename Base::HashTable;
|
||||||
using Base = ReadOnlyHashTable<Allocator>;
|
|
||||||
using HashTable = typename Base::HashTable;
|
|
||||||
|
|
||||||
WritableHashTable(
|
WritableHashTable(HashTable& hashTable, IEpochActionManager& epochManager)
|
||||||
HashTable& hashTable,
|
: Base(hashTable), m_epochManager{epochManager} {}
|
||||||
IEpochActionManager& epochManager)
|
|
||||||
: Base(hashTable)
|
|
||||||
, m_epochManager{ epochManager }
|
|
||||||
{}
|
|
||||||
|
|
||||||
virtual void Add(const Key& key, const Value& value) override
|
virtual void Add(const Key& key, const Value& value) override {
|
||||||
{
|
Add(CreateRecordBuffer(key, value));
|
||||||
Add(CreateRecordBuffer(key, value));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
virtual bool Remove(const Key& key) override
|
virtual bool Remove(const Key& key) override {
|
||||||
{
|
const auto bucketInfo = this->GetBucketInfo(key);
|
||||||
const auto bucketInfo = this->GetBucketInfo(key);
|
|
||||||
|
|
||||||
auto* entry = &(this->m_hashTable.m_buckets[bucketInfo.first]);
|
auto* entry = &(this->m_hashTable.m_buckets[bucketInfo.first]);
|
||||||
|
|
||||||
typename HashTable::Lock lock{ this->m_hashTable.GetMutex(bucketInfo.first) };
|
typename HashTable::Lock lock{this->m_hashTable.GetMutex(bucketInfo.first)};
|
||||||
|
|
||||||
// Note that similar to Add(), the following block is performed inside a critical section,
|
// Note that similar to Add(), the following block is performed inside a
|
||||||
// therefore, it is safe to do "Load"s with memory_order_relaxed.
|
// critical section, therefore, it is safe to do "Load"s with
|
||||||
while (entry != nullptr)
|
// memory_order_relaxed.
|
||||||
{
|
while (entry != nullptr) {
|
||||||
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
|
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) {
|
||||||
{
|
if (bucketInfo.second == entry->m_tags[i]) {
|
||||||
if (bucketInfo.second == entry->m_tags[i])
|
const auto data =
|
||||||
{
|
entry->m_dataList[i].Load(std::memory_order_relaxed);
|
||||||
const auto data = entry->m_dataList[i].Load(std::memory_order_relaxed);
|
|
||||||
|
|
||||||
if (data != nullptr)
|
if (data != nullptr) {
|
||||||
{
|
const auto record = this->m_recordSerializer.Deserialize(*data);
|
||||||
const auto record = this->m_recordSerializer.Deserialize(*data);
|
if (record.m_key == key) {
|
||||||
if (record.m_key == key)
|
Remove(*entry, i);
|
||||||
{
|
return true;
|
||||||
Remove(*entry, i);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
entry = entry->m_next.Load(std::memory_order_relaxed);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
entry = entry->m_next.Load(std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ISerializerPtr GetSerializer() const override
|
return false;
|
||||||
{
|
}
|
||||||
return std::make_unique<WritableHashTable::Serializer>(this->m_hashTable);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
virtual ISerializerPtr GetSerializer() const override {
|
||||||
void Add(RecordBuffer* recordToAdd)
|
return std::make_unique<WritableHashTable::Serializer>(this->m_hashTable);
|
||||||
{
|
}
|
||||||
assert(recordToAdd != nullptr);
|
|
||||||
|
|
||||||
const auto newRecord = this->m_recordSerializer.Deserialize(*recordToAdd);
|
protected:
|
||||||
const auto& newKey = newRecord.m_key;
|
void Add(RecordBuffer* recordToAdd) {
|
||||||
const auto& newValue = newRecord.m_value;
|
assert(recordToAdd != nullptr);
|
||||||
|
|
||||||
Stat stat{ newKey.m_size, newValue.m_size };
|
const auto newRecord = this->m_recordSerializer.Deserialize(*recordToAdd);
|
||||||
|
const auto& newKey = newRecord.m_key;
|
||||||
|
const auto& newValue = newRecord.m_value;
|
||||||
|
|
||||||
const auto bucketInfo = this->GetBucketInfo(newKey);
|
Stat stat{newKey.m_size, newValue.m_size};
|
||||||
|
|
||||||
auto* curEntry = &(this->m_hashTable.m_buckets[bucketInfo.first]);
|
const auto bucketInfo = this->GetBucketInfo(newKey);
|
||||||
|
|
||||||
typename HashTable::Entry* entryToUpdate = nullptr;
|
auto* curEntry = &(this->m_hashTable.m_buckets[bucketInfo.first]);
|
||||||
std::uint8_t curDataIndex = 0U;
|
|
||||||
|
|
||||||
typename HashTable::UniqueLock lock{ this->m_hashTable.GetMutex(bucketInfo.first) };
|
typename HashTable::Entry* entryToUpdate = nullptr;
|
||||||
|
std::uint8_t curDataIndex = 0U;
|
||||||
|
|
||||||
// Note that the following block is performed inside a critical section, therefore,
|
typename HashTable::UniqueLock lock{
|
||||||
// it is safe to do "Load"s with memory_order_relaxed.
|
this->m_hashTable.GetMutex(bucketInfo.first)};
|
||||||
while (curEntry != nullptr)
|
|
||||||
{
|
|
||||||
++stat.m_chainIndex;
|
|
||||||
|
|
||||||
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i)
|
// Note that the following block is performed inside a critical section,
|
||||||
{
|
// therefore, it is safe to do "Load"s with memory_order_relaxed.
|
||||||
const auto data = curEntry->m_dataList[i].Load(std::memory_order_relaxed);
|
while (curEntry != nullptr) {
|
||||||
|
++stat.m_chainIndex;
|
||||||
|
|
||||||
if (data == nullptr)
|
for (std::uint8_t i = 0; i < HashTable::Entry::c_numDataPerEntry; ++i) {
|
||||||
{
|
const auto data =
|
||||||
if (entryToUpdate == nullptr)
|
curEntry->m_dataList[i].Load(std::memory_order_relaxed);
|
||||||
{
|
|
||||||
// Found an entry with no data set, but still need to go through the end of
|
|
||||||
// the list to see if an entry with the given key exists.
|
|
||||||
entryToUpdate = curEntry;
|
|
||||||
curDataIndex = i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (curEntry->m_tags[i] == bucketInfo.second)
|
|
||||||
{
|
|
||||||
const auto oldRecord = this->m_recordSerializer.Deserialize(*data);
|
|
||||||
if (newKey == oldRecord.m_key)
|
|
||||||
{
|
|
||||||
// Will overwrite this entry data.
|
|
||||||
entryToUpdate = curEntry;
|
|
||||||
curDataIndex = i;
|
|
||||||
stat.m_oldValueSize = oldRecord.m_value.m_size;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Found the entry data to replaces.
|
if (data == nullptr) {
|
||||||
if (stat.m_oldValueSize != 0U)
|
if (entryToUpdate == nullptr) {
|
||||||
{
|
// Found an entry with no data set, but still need to go through the
|
||||||
break;
|
// end of the list to see if an entry with the given key exists.
|
||||||
}
|
entryToUpdate = curEntry;
|
||||||
|
curDataIndex = i;
|
||||||
// Check if this is the end of the chaining. If so, create a new entry if we haven't found
|
}
|
||||||
// any entry to update along the way.
|
} else if (curEntry->m_tags[i] == bucketInfo.second) {
|
||||||
if (entryToUpdate == nullptr && curEntry->m_next.Load(std::memory_order_relaxed) == nullptr)
|
const auto oldRecord = this->m_recordSerializer.Deserialize(*data);
|
||||||
{
|
if (newKey == oldRecord.m_key) {
|
||||||
curEntry->m_next.Store(
|
// Will overwrite this entry data.
|
||||||
new (Detail::to_raw_pointer(
|
entryToUpdate = curEntry;
|
||||||
this->m_hashTable.template GetAllocator<typename HashTable::Entry>().allocate(1U)))
|
curDataIndex = i;
|
||||||
typename HashTable::Entry(),
|
stat.m_oldValueSize = oldRecord.m_value.m_size;
|
||||||
std::memory_order_release);
|
break;
|
||||||
|
}
|
||||||
stat.m_isNewEntryAdded = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
curEntry = curEntry->m_next.Load(std::memory_order_relaxed);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
assert(entryToUpdate != nullptr);
|
// Found the entry data to replaces.
|
||||||
|
if (stat.m_oldValueSize != 0U) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
auto recordToDelete = UpdateRecord(*entryToUpdate, curDataIndex, recordToAdd, bucketInfo.second);
|
// Check if this is the end of the chaining. If so, create a new entry if
|
||||||
|
// we haven't found any entry to update along the way.
|
||||||
|
if (entryToUpdate == nullptr &&
|
||||||
|
curEntry->m_next.Load(std::memory_order_relaxed) == nullptr) {
|
||||||
|
curEntry->m_next.Store(
|
||||||
|
new (Detail::to_raw_pointer(
|
||||||
|
this->m_hashTable
|
||||||
|
.template GetAllocator<typename HashTable::Entry>()
|
||||||
|
.allocate(1U))) typename HashTable::Entry(),
|
||||||
|
std::memory_order_release);
|
||||||
|
|
||||||
lock.unlock();
|
stat.m_isNewEntryAdded = true;
|
||||||
|
}
|
||||||
|
|
||||||
UpdatePerfDataForAdd(stat);
|
curEntry = curEntry->m_next.Load(std::memory_order_relaxed);
|
||||||
|
|
||||||
ReleaseRecord(recordToDelete);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The chainIndex is the 1-based index for the given entry in the chained bucket list.
|
assert(entryToUpdate != nullptr);
|
||||||
// It is assumed that this function is called under a lock.
|
|
||||||
void Remove(typename HashTable::Entry& entry, std::uint8_t index)
|
|
||||||
{
|
|
||||||
auto recordToDelete = UpdateRecord(entry, index, nullptr, 0U);
|
|
||||||
|
|
||||||
assert(recordToDelete != nullptr);
|
auto recordToDelete = UpdateRecord(*entryToUpdate, curDataIndex,
|
||||||
|
recordToAdd, bucketInfo.second);
|
||||||
|
|
||||||
const auto record = this->m_recordSerializer.Deserialize(*recordToDelete);
|
lock.unlock();
|
||||||
|
|
||||||
UpdatePerfDataForRemove(
|
UpdatePerfDataForAdd(stat);
|
||||||
Stat{
|
|
||||||
record.m_key.m_size,
|
|
||||||
record.m_value.m_size,
|
|
||||||
0U
|
|
||||||
});
|
|
||||||
|
|
||||||
ReleaseRecord(recordToDelete);
|
ReleaseRecord(recordToDelete);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The chainIndex is the 1-based index for the given entry in the chained
|
||||||
|
// bucket list. It is assumed that this function is called under a lock.
|
||||||
|
void Remove(typename HashTable::Entry& entry, std::uint8_t index) {
|
||||||
|
auto recordToDelete = UpdateRecord(entry, index, nullptr, 0U);
|
||||||
|
|
||||||
|
assert(recordToDelete != nullptr);
|
||||||
|
|
||||||
|
const auto record = this->m_recordSerializer.Deserialize(*recordToDelete);
|
||||||
|
|
||||||
|
UpdatePerfDataForRemove(
|
||||||
|
Stat{record.m_key.m_size, record.m_value.m_size, 0U});
|
||||||
|
|
||||||
|
ReleaseRecord(recordToDelete);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
struct Stat;
|
||||||
|
|
||||||
|
class Serializer;
|
||||||
|
|
||||||
|
RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value) {
|
||||||
|
const auto bufferSize =
|
||||||
|
this->m_recordSerializer.CalculateBufferSize(key, value);
|
||||||
|
auto buffer = Detail::to_raw_pointer(
|
||||||
|
this->m_hashTable.template GetAllocator<std::uint8_t>().allocate(
|
||||||
|
bufferSize));
|
||||||
|
|
||||||
|
return this->m_recordSerializer.Serialize(key, value, buffer, bufferSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
RecordBuffer* UpdateRecord(typename HashTable::Entry& entry,
|
||||||
|
std::uint8_t index,
|
||||||
|
RecordBuffer* newRecord,
|
||||||
|
std::uint8_t newTag) {
|
||||||
|
// This function should be called under a lock, so calling with
|
||||||
|
// memory_order_relaxed for Load() is safe.
|
||||||
|
auto& recordHolder = entry.m_dataList[index];
|
||||||
|
auto oldRecord = recordHolder.Load(std::memory_order_relaxed);
|
||||||
|
|
||||||
|
recordHolder.Store(newRecord, std::memory_order_release);
|
||||||
|
entry.m_tags[index] = newTag;
|
||||||
|
|
||||||
|
return oldRecord;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReleaseRecord(RecordBuffer* record) {
|
||||||
|
if (record == nullptr) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
m_epochManager.RegisterAction([this, record]() {
|
||||||
struct Stat;
|
record->~RecordBuffer();
|
||||||
|
this->m_hashTable.template GetAllocator<RecordBuffer>().deallocate(record,
|
||||||
|
1U);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
class Serializer;
|
void UpdatePerfDataForAdd(const Stat& stat) {
|
||||||
|
auto& perfData = this->m_hashTable.m_perfData;
|
||||||
|
|
||||||
RecordBuffer* CreateRecordBuffer(const Key& key, const Value& value)
|
if (stat.m_oldValueSize != 0U) {
|
||||||
{
|
// Updating the existing record. Therefore, no change in the key size.
|
||||||
const auto bufferSize = this->m_recordSerializer.CalculateBufferSize(key, value);
|
perfData.Add(HashTablePerfCounter::TotalValueSize,
|
||||||
auto buffer = Detail::to_raw_pointer(
|
static_cast<HashTablePerfData::TValue>(stat.m_valueSize) -
|
||||||
this->m_hashTable.template GetAllocator<std::uint8_t>().allocate(bufferSize));
|
stat.m_oldValueSize);
|
||||||
|
} else {
|
||||||
|
// We are adding a new data instead of replacing.
|
||||||
|
perfData.Add(HashTablePerfCounter::TotalKeySize, stat.m_keySize);
|
||||||
|
perfData.Add(HashTablePerfCounter::TotalValueSize, stat.m_valueSize);
|
||||||
|
perfData.Add(
|
||||||
|
HashTablePerfCounter::TotalIndexSize,
|
||||||
|
// Record overhead.
|
||||||
|
this->m_recordSerializer.CalculateRecordOverhead()
|
||||||
|
// Entry overhead if created.
|
||||||
|
+ (stat.m_isNewEntryAdded ? sizeof(typename HashTable::Entry)
|
||||||
|
: 0U));
|
||||||
|
|
||||||
return this->m_recordSerializer.Serialize(key, value, buffer, bufferSize);
|
perfData.Min(HashTablePerfCounter::MinKeySize, stat.m_keySize);
|
||||||
}
|
perfData.Max(HashTablePerfCounter::MaxKeySize, stat.m_keySize);
|
||||||
|
|
||||||
RecordBuffer* UpdateRecord(
|
perfData.Increment(HashTablePerfCounter::RecordsCount);
|
||||||
typename HashTable::Entry& entry,
|
|
||||||
std::uint8_t index,
|
|
||||||
RecordBuffer* newRecord,
|
|
||||||
std::uint8_t newTag)
|
|
||||||
{
|
|
||||||
// This function should be called under a lock, so calling with memory_order_relaxed for Load() is safe.
|
|
||||||
auto& recordHolder = entry.m_dataList[index];
|
|
||||||
auto oldRecord = recordHolder.Load(std::memory_order_relaxed);
|
|
||||||
|
|
||||||
recordHolder.Store(newRecord, std::memory_order_release);
|
if (stat.m_isNewEntryAdded) {
|
||||||
entry.m_tags[index] = newTag;
|
perfData.Increment(HashTablePerfCounter::ChainingEntriesCount);
|
||||||
|
|
||||||
return oldRecord;
|
if (stat.m_chainIndex > 1U) {
|
||||||
}
|
perfData.Max(HashTablePerfCounter::MaxBucketChainLength,
|
||||||
|
stat.m_chainIndex);
|
||||||
void ReleaseRecord(RecordBuffer* record)
|
|
||||||
{
|
|
||||||
if (record == nullptr)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
m_epochManager.RegisterAction(
|
|
||||||
[this, record]()
|
|
||||||
{
|
|
||||||
record->~RecordBuffer();
|
|
||||||
this->m_hashTable.template GetAllocator<RecordBuffer>().deallocate(record, 1U);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void UpdatePerfDataForAdd(const Stat& stat)
|
perfData.Min(HashTablePerfCounter::MinValueSize, stat.m_valueSize);
|
||||||
{
|
perfData.Max(HashTablePerfCounter::MaxValueSize, stat.m_valueSize);
|
||||||
auto& perfData = this->m_hashTable.m_perfData;
|
}
|
||||||
|
|
||||||
if (stat.m_oldValueSize != 0U)
|
void UpdatePerfDataForRemove(const Stat& stat) {
|
||||||
{
|
auto& perfData = this->m_hashTable.m_perfData;
|
||||||
// Updating the existing record. Therefore, no change in the key size.
|
|
||||||
perfData.Add(HashTablePerfCounter::TotalValueSize,
|
|
||||||
static_cast<HashTablePerfData::TValue>(stat.m_valueSize) - stat.m_oldValueSize);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// We are adding a new data instead of replacing.
|
|
||||||
perfData.Add(HashTablePerfCounter::TotalKeySize, stat.m_keySize);
|
|
||||||
perfData.Add(HashTablePerfCounter::TotalValueSize, stat.m_valueSize);
|
|
||||||
perfData.Add(HashTablePerfCounter::TotalIndexSize,
|
|
||||||
// Record overhead.
|
|
||||||
this->m_recordSerializer.CalculateRecordOverhead()
|
|
||||||
// Entry overhead if created.
|
|
||||||
+ (stat.m_isNewEntryAdded ? sizeof(typename HashTable::Entry) : 0U));
|
|
||||||
|
|
||||||
perfData.Min(HashTablePerfCounter::MinKeySize, stat.m_keySize);
|
perfData.Decrement(HashTablePerfCounter::RecordsCount);
|
||||||
perfData.Max(HashTablePerfCounter::MaxKeySize, stat.m_keySize);
|
perfData.Subtract(HashTablePerfCounter::TotalKeySize, stat.m_keySize);
|
||||||
|
perfData.Subtract(HashTablePerfCounter::TotalValueSize, stat.m_valueSize);
|
||||||
|
perfData.Subtract(HashTablePerfCounter::TotalIndexSize,
|
||||||
|
this->m_recordSerializer.CalculateRecordOverhead());
|
||||||
|
}
|
||||||
|
|
||||||
perfData.Increment(HashTablePerfCounter::RecordsCount);
|
IEpochActionManager& m_epochManager;
|
||||||
|
|
||||||
if (stat.m_isNewEntryAdded)
|
|
||||||
{
|
|
||||||
perfData.Increment(HashTablePerfCounter::ChainingEntriesCount);
|
|
||||||
|
|
||||||
if (stat.m_chainIndex > 1U)
|
|
||||||
{
|
|
||||||
perfData.Max(HashTablePerfCounter::MaxBucketChainLength, stat.m_chainIndex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
perfData.Min(HashTablePerfCounter::MinValueSize, stat.m_valueSize);
|
|
||||||
perfData.Max(HashTablePerfCounter::MaxValueSize, stat.m_valueSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
void UpdatePerfDataForRemove(const Stat& stat)
|
|
||||||
{
|
|
||||||
auto& perfData = this->m_hashTable.m_perfData;
|
|
||||||
|
|
||||||
perfData.Decrement(HashTablePerfCounter::RecordsCount);
|
|
||||||
perfData.Subtract(HashTablePerfCounter::TotalKeySize, stat.m_keySize);
|
|
||||||
perfData.Subtract(HashTablePerfCounter::TotalValueSize, stat.m_valueSize);
|
|
||||||
perfData.Subtract(HashTablePerfCounter::TotalIndexSize, this->m_recordSerializer.CalculateRecordOverhead());
|
|
||||||
}
|
|
||||||
|
|
||||||
IEpochActionManager& m_epochManager;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#pragma warning(pop)
|
#pragma warning(pop)
|
||||||
|
|
||||||
|
|
||||||
// WritableHashTable::Stat struct encapsulates stats for Add()/Remove().
|
// WritableHashTable::Stat struct encapsulates stats for Add()/Remove().
|
||||||
template <typename Allocator>
|
template <typename Allocator>
|
||||||
struct WritableHashTable<Allocator>::Stat
|
struct WritableHashTable<Allocator>::Stat {
|
||||||
{
|
using KeySize = Key::size_type;
|
||||||
using KeySize = Key::size_type;
|
using ValueSize = Value::size_type;
|
||||||
using ValueSize = Value::size_type;
|
|
||||||
|
|
||||||
explicit Stat(
|
explicit Stat(KeySize keySize = 0U,
|
||||||
KeySize keySize = 0U,
|
ValueSize valueSize = 0U,
|
||||||
ValueSize valueSize = 0U,
|
ValueSize oldValueSize = 0U,
|
||||||
ValueSize oldValueSize = 0U,
|
std::uint32_t chainIndex = 0U,
|
||||||
std::uint32_t chainIndex = 0U,
|
bool isNewEntryAdded = false)
|
||||||
bool isNewEntryAdded = false)
|
: m_keySize{keySize},
|
||||||
: m_keySize{ keySize }
|
m_valueSize{valueSize},
|
||||||
, m_valueSize{ valueSize }
|
m_oldValueSize{oldValueSize},
|
||||||
, m_oldValueSize{ oldValueSize }
|
m_chainIndex{chainIndex},
|
||||||
, m_chainIndex{ chainIndex }
|
m_isNewEntryAdded{isNewEntryAdded} {}
|
||||||
, m_isNewEntryAdded{ isNewEntryAdded }
|
|
||||||
{}
|
|
||||||
|
|
||||||
KeySize m_keySize;
|
KeySize m_keySize;
|
||||||
ValueSize m_valueSize;
|
ValueSize m_valueSize;
|
||||||
ValueSize m_oldValueSize;
|
ValueSize m_oldValueSize;
|
||||||
std::uint32_t m_chainIndex;
|
std::uint32_t m_chainIndex;
|
||||||
bool m_isNewEntryAdded;
|
bool m_isNewEntryAdded;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// WritableHashTable::Serializer class that implements ISerializer, which
|
||||||
// WritableHashTable::Serializer class that implements ISerializer, which provides
|
// provides the functionality to serialize the WritableHashTable.
|
||||||
// the functionality to serialize the WritableHashTable.
|
|
||||||
template <typename Allocator>
|
template <typename Allocator>
|
||||||
class WritableHashTable<Allocator>::Serializer : public IWritableHashTable::ISerializer
|
class WritableHashTable<Allocator>::Serializer
|
||||||
{
|
: public IWritableHashTable::ISerializer {
|
||||||
public:
|
public:
|
||||||
explicit Serializer(HashTable& hashTable)
|
explicit Serializer(HashTable& hashTable) : m_hashTable{hashTable} {}
|
||||||
: m_hashTable{ hashTable }
|
|
||||||
{}
|
|
||||||
|
|
||||||
Serializer(const Serializer&) = delete;
|
Serializer(const Serializer&) = delete;
|
||||||
Serializer& operator=(const Serializer&) = delete;
|
Serializer& operator=(const Serializer&) = delete;
|
||||||
|
|
||||||
void Serialize(
|
void Serialize(std::ostream& stream,
|
||||||
std::ostream& stream,
|
const Utils::Properties& /* properties */) override {
|
||||||
const Utils::Properties& /* properties */) override
|
ReadWrite::Serializer<HashTable, ReadWrite::ReadOnlyHashTable>{}.Serialize(
|
||||||
{
|
m_hashTable, stream);
|
||||||
ReadWrite::Serializer<
|
}
|
||||||
HashTable, ReadWrite::ReadOnlyHashTable>{}.Serialize(m_hashTable, stream);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HashTable& m_hashTable;
|
HashTable& m_hashTable;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ReadWrite
|
} // namespace ReadWrite
|
||||||
} // namespace HashTable
|
} // namespace HashTable
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
|
#include <cstdint>
|
||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
#include "Epoch/IEpochActionManager.h"
|
#include "Epoch/IEpochActionManager.h"
|
||||||
#include "Log/PerfCounter.h"
|
#include "Log/PerfCounter.h"
|
||||||
|
@ -9,27 +9,21 @@
|
||||||
#include "Utils/Exception.h"
|
#include "Utils/Exception.h"
|
||||||
#include "Utils/Properties.h"
|
#include "Utils/Properties.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace HashTable {
|
||||||
namespace HashTable
|
namespace ReadWrite {
|
||||||
{
|
|
||||||
namespace ReadWrite
|
|
||||||
{
|
|
||||||
|
|
||||||
// Note that the HashTable template parameter in this file is
|
// Note that the HashTable template parameter in this file is
|
||||||
// HashTable::ReadWrite::ReadOnlyHashTable<Allocator>::HashTable.
|
// HashTable::ReadWrite::ReadOnlyHashTable<Allocator>::HashTable.
|
||||||
// However, due to the cyclic dependency, it needs to be passed as a template type.
|
// However, due to the cyclic dependency, it needs to be passed as a template
|
||||||
|
// type.
|
||||||
|
|
||||||
|
// All the deprecated (previous versions) serializer should be put inside the
|
||||||
|
// Deprecated namespace. Removing any of the Deprecated serializers from the
|
||||||
|
// source code will require the major package version change.
|
||||||
|
namespace Deprecated {} // namespace Deprecated
|
||||||
|
|
||||||
// All the deprecated (previous versions) serializer should be put inside the Deprecated namespace.
|
namespace Current {
|
||||||
// Removing any of the Deprecated serializers from the source code will require the major package version change.
|
|
||||||
namespace Deprecated
|
|
||||||
{
|
|
||||||
} // namespace Deprecated
|
|
||||||
|
|
||||||
|
|
||||||
namespace Current
|
|
||||||
{
|
|
||||||
|
|
||||||
constexpr std::uint8_t c_version = 1U;
|
constexpr std::uint8_t c_version = 1U;
|
||||||
|
|
||||||
|
@ -40,189 +34,185 @@ constexpr std::uint8_t c_version = 1U;
|
||||||
// <Key size> <Key bytes> <Value size> <Value bytes>
|
// <Key size> <Key bytes> <Value size> <Value bytes>
|
||||||
// Otherwise, end of the records.
|
// Otherwise, end of the records.
|
||||||
template <typename HashTable, template <typename> class ReadOnlyHashTable>
|
template <typename HashTable, template <typename> class ReadOnlyHashTable>
|
||||||
class Serializer
|
class Serializer {
|
||||||
{
|
public:
|
||||||
public:
|
Serializer() = default;
|
||||||
Serializer() = default;
|
|
||||||
|
|
||||||
Serializer(const Serializer&) = delete;
|
Serializer(const Serializer&) = delete;
|
||||||
Serializer& operator=(const Serializer&) = delete;
|
Serializer& operator=(const Serializer&) = delete;
|
||||||
|
|
||||||
void Serialize(
|
void Serialize(HashTable& hashTable, std::ostream& stream) const {
|
||||||
HashTable& hashTable,
|
auto& perfData = hashTable.m_perfData;
|
||||||
std::ostream& stream) const
|
perfData.Set(HashTablePerfCounter::RecordsCountSavedFromSerializer, 0);
|
||||||
{
|
|
||||||
auto& perfData = hashTable.m_perfData;
|
|
||||||
perfData.Set(HashTablePerfCounter::RecordsCountSavedFromSerializer, 0);
|
|
||||||
|
|
||||||
SerializerHelper helper(stream);
|
SerializerHelper helper(stream);
|
||||||
|
|
||||||
helper.Serialize(c_version);
|
helper.Serialize(c_version);
|
||||||
|
|
||||||
helper.Serialize(&hashTable.m_setting, sizeof(hashTable.m_setting));
|
helper.Serialize(&hashTable.m_setting, sizeof(hashTable.m_setting));
|
||||||
|
|
||||||
ReadOnlyHashTable<typename HashTable::Allocator> readOnlyHashTable(hashTable);
|
ReadOnlyHashTable<typename HashTable::Allocator> readOnlyHashTable(
|
||||||
|
hashTable);
|
||||||
|
|
||||||
auto iterator = readOnlyHashTable.GetIterator();
|
auto iterator = readOnlyHashTable.GetIterator();
|
||||||
while (iterator->MoveNext())
|
while (iterator->MoveNext()) {
|
||||||
{
|
helper.Serialize(true); // Indicates record exists.
|
||||||
helper.Serialize(true); // Indicates record exists.
|
const auto key = iterator->GetKey();
|
||||||
const auto key = iterator->GetKey();
|
const auto value = iterator->GetValue();
|
||||||
const auto value = iterator->GetValue();
|
|
||||||
|
|
||||||
helper.Serialize(key.m_size);
|
helper.Serialize(key.m_size);
|
||||||
helper.Serialize(key.m_data, key.m_size);
|
helper.Serialize(key.m_data, key.m_size);
|
||||||
|
|
||||||
helper.Serialize(value.m_size);
|
helper.Serialize(value.m_size);
|
||||||
helper.Serialize(value.m_data, value.m_size);
|
helper.Serialize(value.m_data, value.m_size);
|
||||||
|
|
||||||
perfData.Increment(HashTablePerfCounter::RecordsCountSavedFromSerializer);
|
perfData.Increment(HashTablePerfCounter::RecordsCountSavedFromSerializer);
|
||||||
}
|
|
||||||
|
|
||||||
helper.Serialize(false); // Indicates the end of records.
|
|
||||||
|
|
||||||
// Flush perf counter so that the values are up to date when GetPerfData() is called.
|
|
||||||
std::atomic_thread_fence(std::memory_order_release);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
helper.Serialize(false); // Indicates the end of records.
|
||||||
|
|
||||||
|
// Flush perf counter so that the values are up to date when GetPerfData()
|
||||||
|
// is called.
|
||||||
|
std::atomic_thread_fence(std::memory_order_release);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Current Deserializer used for deserializing hash tables.
|
// Current Deserializer used for deserializing hash tables.
|
||||||
template <typename Memory, typename HashTable, template <typename> class WritableHashTable>
|
template <typename Memory,
|
||||||
class Deserializer
|
typename HashTable,
|
||||||
{
|
template <typename>
|
||||||
public:
|
class WritableHashTable>
|
||||||
explicit Deserializer(const Utils::Properties& /* properties */)
|
class Deserializer {
|
||||||
{}
|
public:
|
||||||
|
explicit Deserializer(const Utils::Properties& /* properties */) {}
|
||||||
|
|
||||||
Deserializer(const Deserializer&) = delete;
|
Deserializer(const Deserializer&) = delete;
|
||||||
Deserializer& operator=(const Deserializer&) = delete;
|
Deserializer& operator=(const Deserializer&) = delete;
|
||||||
|
|
||||||
typename Memory::template UniquePtr<HashTable> Deserialize(
|
typename Memory::template UniquePtr<HashTable> Deserialize(
|
||||||
Memory& memory,
|
Memory& memory,
|
||||||
std::istream& stream) const
|
std::istream& stream) const {
|
||||||
{
|
DeserializerHelper helper(stream);
|
||||||
DeserializerHelper helper(stream);
|
|
||||||
|
|
||||||
typename HashTable::Setting setting;
|
typename HashTable::Setting setting;
|
||||||
helper.Deserialize(setting);
|
helper.Deserialize(setting);
|
||||||
|
|
||||||
auto hashTable{ memory.template MakeUnique<HashTable>(
|
auto hashTable{
|
||||||
setting,
|
memory.template MakeUnique<HashTable>(setting, memory.GetAllocator())};
|
||||||
memory.GetAllocator()) };
|
|
||||||
|
|
||||||
EpochActionManager epochActionManager;
|
EpochActionManager epochActionManager;
|
||||||
|
|
||||||
WritableHashTable<typename HashTable::Allocator> writableHashTable(
|
WritableHashTable<typename HashTable::Allocator> writableHashTable(
|
||||||
*hashTable,
|
*hashTable, epochActionManager);
|
||||||
epochActionManager);
|
|
||||||
|
|
||||||
auto& perfData = hashTable->m_perfData;
|
auto& perfData = hashTable->m_perfData;
|
||||||
|
|
||||||
std::vector<std::uint8_t> keyBuffer;
|
std::vector<std::uint8_t> keyBuffer;
|
||||||
std::vector<std::uint8_t> valueBuffer;
|
std::vector<std::uint8_t> valueBuffer;
|
||||||
|
|
||||||
bool hasMoreData = false;
|
bool hasMoreData = false;
|
||||||
helper.Deserialize(hasMoreData);
|
helper.Deserialize(hasMoreData);
|
||||||
|
|
||||||
while (hasMoreData)
|
while (hasMoreData) {
|
||||||
{
|
IReadOnlyHashTable::Key key;
|
||||||
IReadOnlyHashTable::Key key;
|
IReadOnlyHashTable::Value value;
|
||||||
IReadOnlyHashTable::Value value;
|
|
||||||
|
|
||||||
helper.Deserialize(key.m_size);
|
helper.Deserialize(key.m_size);
|
||||||
keyBuffer.resize(key.m_size);
|
keyBuffer.resize(key.m_size);
|
||||||
helper.Deserialize(keyBuffer.data(), key.m_size);
|
helper.Deserialize(keyBuffer.data(), key.m_size);
|
||||||
key.m_data = keyBuffer.data();
|
key.m_data = keyBuffer.data();
|
||||||
|
|
||||||
helper.Deserialize(value.m_size);
|
helper.Deserialize(value.m_size);
|
||||||
valueBuffer.resize(value.m_size);
|
valueBuffer.resize(value.m_size);
|
||||||
helper.Deserialize(valueBuffer.data(), value.m_size);
|
helper.Deserialize(valueBuffer.data(), value.m_size);
|
||||||
value.m_data = valueBuffer.data();
|
value.m_data = valueBuffer.data();
|
||||||
|
|
||||||
writableHashTable.Add(key, value);
|
writableHashTable.Add(key, value);
|
||||||
|
|
||||||
helper.Deserialize(hasMoreData);
|
helper.Deserialize(hasMoreData);
|
||||||
|
|
||||||
perfData.Increment(HashTablePerfCounter::RecordsCountLoadedFromSerializer);
|
perfData.Increment(
|
||||||
}
|
HashTablePerfCounter::RecordsCountLoadedFromSerializer);
|
||||||
|
|
||||||
// Flush perf counter so that the values are up to date when GetPerfData() is called.
|
|
||||||
std::atomic_thread_fence(std::memory_order_release);
|
|
||||||
|
|
||||||
return hashTable;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
// Flush perf counter so that the values are up to date when GetPerfData()
|
||||||
// Deserializer internally uses WritableHashTable for deserialization, therefore
|
// is called.
|
||||||
// an implementation of IEpochActionManager is needed. Since all the keys in the hash table
|
std::atomic_thread_fence(std::memory_order_release);
|
||||||
// are expected to be unique, no RegisterAction() should be called.
|
|
||||||
class EpochActionManager : public IEpochActionManager
|
return hashTable;
|
||||||
{
|
}
|
||||||
public:
|
|
||||||
void RegisterAction(Action&& /* action */) override
|
private:
|
||||||
{
|
// Deserializer internally uses WritableHashTable for deserialization,
|
||||||
// Since it is assumed that the serializer is loading from the stream generated by the same serializer,
|
// therefore an implementation of IEpochActionManager is needed. Since all the
|
||||||
// it is guaranteed that all the keys are unique (a property of a hash table). Therefore, RegisterAction()
|
// keys in the hash table are expected to be unique, no RegisterAction()
|
||||||
// should not be called by the WritableHashTable.
|
// should be called.
|
||||||
throw RuntimeException("RegisterAction() should not be called from the serializer.");
|
class EpochActionManager : public IEpochActionManager {
|
||||||
}
|
public:
|
||||||
};
|
void RegisterAction(Action&& /* action */) override {
|
||||||
|
// Since it is assumed that the serializer is loading from the stream
|
||||||
|
// generated by the same serializer, it is guaranteed that all the keys
|
||||||
|
// are unique (a property of a hash table). Therefore, RegisterAction()
|
||||||
|
// should not be called by the WritableHashTable.
|
||||||
|
throw RuntimeException(
|
||||||
|
"RegisterAction() should not be called from the serializer.");
|
||||||
|
}
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Current
|
} // namespace Current
|
||||||
|
|
||||||
|
|
||||||
// Serializer is the main driver for serializing a hash table.
|
// Serializer is the main driver for serializing a hash table.
|
||||||
// It always uses the Current::Serializer for serializing a hash table.
|
// It always uses the Current::Serializer for serializing a hash table.
|
||||||
template <typename HashTable, template <typename> class ReadOnlyHashTable>
|
template <typename HashTable, template <typename> class ReadOnlyHashTable>
|
||||||
class Serializer
|
class Serializer {
|
||||||
{
|
public:
|
||||||
public:
|
Serializer() = default;
|
||||||
Serializer() = default;
|
Serializer(const Serializer&) = delete;
|
||||||
Serializer(const Serializer&) = delete;
|
Serializer& operator=(const Serializer&) = delete;
|
||||||
Serializer& operator=(const Serializer&) = delete;
|
|
||||||
|
|
||||||
void Serialize(HashTable& hashTable, std::ostream& stream) const
|
void Serialize(HashTable& hashTable, std::ostream& stream) const {
|
||||||
{
|
Current::Serializer<HashTable, ReadOnlyHashTable>{}.Serialize(hashTable,
|
||||||
Current::Serializer<HashTable, ReadOnlyHashTable>{}.Serialize(hashTable, stream);
|
stream);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Deserializer is the main driver for deserializing the input stream to create a hash table.
|
// Deserializer is the main driver for deserializing the input stream to create
|
||||||
template <typename Memory, typename HashTable, template <typename> class WritableHashTable>
|
// a hash table.
|
||||||
class Deserializer
|
template <typename Memory,
|
||||||
{
|
typename HashTable,
|
||||||
public:
|
template <typename>
|
||||||
explicit Deserializer(const Utils::Properties& properties)
|
class WritableHashTable>
|
||||||
: m_properties(properties)
|
class Deserializer {
|
||||||
{}
|
public:
|
||||||
|
explicit Deserializer(const Utils::Properties& properties)
|
||||||
|
: m_properties(properties) {}
|
||||||
|
|
||||||
Deserializer(const Deserializer&) = delete;
|
Deserializer(const Deserializer&) = delete;
|
||||||
Deserializer& operator=(const Deserializer&) = delete;
|
Deserializer& operator=(const Deserializer&) = delete;
|
||||||
|
|
||||||
typename Memory::template UniquePtr<HashTable> Deserialize(
|
typename Memory::template UniquePtr<HashTable> Deserialize(
|
||||||
Memory& memory,
|
Memory& memory,
|
||||||
std::istream& stream) const
|
std::istream& stream) const {
|
||||||
{
|
std::uint8_t version = 0U;
|
||||||
std::uint8_t version = 0U;
|
DeserializerHelper(stream).Deserialize(version);
|
||||||
DeserializerHelper(stream).Deserialize(version);
|
|
||||||
|
|
||||||
switch (version)
|
switch (version) {
|
||||||
{
|
case Current::c_version:
|
||||||
case Current::c_version:
|
return Current::Deserializer<Memory, HashTable, WritableHashTable>{
|
||||||
return Current::Deserializer<Memory, HashTable, WritableHashTable>{ m_properties }.Deserialize(memory, stream);
|
m_properties}
|
||||||
default:
|
.Deserialize(memory, stream);
|
||||||
boost::format err("Unsupported version '%1%' is given.");
|
default:
|
||||||
err % version;
|
boost::format err("Unsupported version '%1%' is given.");
|
||||||
throw RuntimeException(err.str());
|
err % version;
|
||||||
}
|
throw RuntimeException(err.str());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Utils::Properties& m_properties;
|
const Utils::Properties& m_properties;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace ReadWrite
|
} // namespace ReadWrite
|
||||||
} // namespace HashTable
|
} // namespace HashTable
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
#include <functional>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
@ -8,12 +9,9 @@
|
||||||
#include "Interprocess/Connection/EndPointInfo.h"
|
#include "Interprocess/Connection/EndPointInfo.h"
|
||||||
#include "Interprocess/Utils/Handle.h"
|
#include "Interprocess/Utils/Handle.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Connection {
|
||||||
{
|
|
||||||
namespace Connection
|
|
||||||
{
|
|
||||||
|
|
||||||
// ConnectionMonitor monitors any registered end points.
|
// ConnectionMonitor monitors any registered end points.
|
||||||
// ConnectionMonitor creates a kernel event for local end point,
|
// ConnectionMonitor creates a kernel event for local end point,
|
||||||
|
@ -22,91 +20,84 @@ namespace Connection
|
||||||
// is closed, the callback registered is triggered and the remote endpoint
|
// is closed, the callback registered is triggered and the remote endpoint
|
||||||
// is removed from the ConnectionMonitor after the callback is finished..
|
// is removed from the ConnectionMonitor after the callback is finished..
|
||||||
class ConnectionMonitor
|
class ConnectionMonitor
|
||||||
: public std::enable_shared_from_this<ConnectionMonitor>
|
: public std::enable_shared_from_this<ConnectionMonitor> {
|
||||||
{
|
public:
|
||||||
public:
|
using Callback = std::function<void(const EndPointInfo&)>;
|
||||||
using Callback = std::function<void(const EndPointInfo&)>;
|
|
||||||
|
|
||||||
ConnectionMonitor();
|
ConnectionMonitor();
|
||||||
~ConnectionMonitor();
|
~ConnectionMonitor();
|
||||||
|
|
||||||
const EndPointInfo& GetLocalEndPointInfo() const;
|
const EndPointInfo& GetLocalEndPointInfo() const;
|
||||||
|
|
||||||
std::size_t GetRemoteConnectionsCount() const;
|
std::size_t GetRemoteConnectionsCount() const;
|
||||||
|
|
||||||
void Register(const EndPointInfo& remoteEndPoint, Callback callback);
|
void Register(const EndPointInfo& remoteEndPoint, Callback callback);
|
||||||
|
|
||||||
void UnRegister(const EndPointInfo& remoteEndPoint);
|
void UnRegister(const EndPointInfo& remoteEndPoint);
|
||||||
|
|
||||||
ConnectionMonitor(const ConnectionMonitor&) = delete;
|
ConnectionMonitor(const ConnectionMonitor&) = delete;
|
||||||
ConnectionMonitor& operator=(const ConnectionMonitor&) = delete;
|
ConnectionMonitor& operator=(const ConnectionMonitor&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class HandleMonitor;
|
class HandleMonitor;
|
||||||
|
|
||||||
// UnRegister() removes the unregistered end points from m_remoteEvents.
|
// UnRegister() removes the unregistered end points from m_remoteEvents.
|
||||||
void UnRegister() const;
|
void UnRegister() const;
|
||||||
|
|
||||||
const EndPointInfo m_localEndPoint;
|
const EndPointInfo m_localEndPoint;
|
||||||
|
|
||||||
Utils::Handle m_localEvent;
|
Utils::Handle m_localEvent;
|
||||||
|
|
||||||
mutable std::map<EndPointInfo, std::unique_ptr<HandleMonitor>> m_remoteMonitors;
|
mutable std::map<EndPointInfo, std::unique_ptr<HandleMonitor>>
|
||||||
|
m_remoteMonitors;
|
||||||
|
|
||||||
mutable std::mutex m_mutexOnRemoteMonitors;
|
mutable std::mutex m_mutexOnRemoteMonitors;
|
||||||
|
|
||||||
mutable std::vector<EndPointInfo> m_unregisteredEndPoints;
|
mutable std::vector<EndPointInfo> m_unregisteredEndPoints;
|
||||||
|
|
||||||
mutable std::mutex m_mutexOnUnregisteredEndPoints;
|
mutable std::mutex m_mutexOnUnregisteredEndPoints;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// ConnectionMonitor::HandleMonitor opens the given endpoint's process
|
// ConnectionMonitor::HandleMonitor opens the given endpoint's process
|
||||||
// and event handles and waits for any event triggers.
|
// and event handles and waits for any event triggers.
|
||||||
class ConnectionMonitor::HandleMonitor
|
class ConnectionMonitor::HandleMonitor {
|
||||||
{
|
public:
|
||||||
public:
|
HandleMonitor(const EndPointInfo& remoteEndPoint, Callback callback);
|
||||||
HandleMonitor(
|
|
||||||
const EndPointInfo& remoteEndPoint,
|
|
||||||
Callback callback);
|
|
||||||
|
|
||||||
HandleMonitor(const HandleMonitor&) = delete;
|
HandleMonitor(const HandleMonitor&) = delete;
|
||||||
HandleMonitor& operator=(const HandleMonitor&) = delete;
|
HandleMonitor& operator=(const HandleMonitor&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class Waiter;
|
class Waiter;
|
||||||
|
|
||||||
std::unique_ptr<Waiter> m_eventWaiter;
|
std::unique_ptr<Waiter> m_eventWaiter;
|
||||||
std::unique_ptr<Waiter> m_processWaiter;
|
std::unique_ptr<Waiter> m_processWaiter;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// ConnectionMonitor::HandleMonitor::Waiter waits on the given handle and calls
|
// ConnectionMonitor::HandleMonitor::Waiter waits on the given handle and calls
|
||||||
// the given callback when an event is triggered on the handle.
|
// the given callback when an event is triggered on the handle.
|
||||||
class ConnectionMonitor::HandleMonitor::Waiter
|
class ConnectionMonitor::HandleMonitor::Waiter {
|
||||||
{
|
public:
|
||||||
public:
|
using Callback = std::function<void()>;
|
||||||
using Callback = std::function<void()>;
|
|
||||||
|
|
||||||
Waiter(Utils::Handle handle, Callback callback);
|
Waiter(Utils::Handle handle, Callback callback);
|
||||||
|
|
||||||
~Waiter();
|
~Waiter();
|
||||||
|
|
||||||
Waiter(const Waiter&) = delete;
|
Waiter(const Waiter&) = delete;
|
||||||
Waiter& operator=(const Waiter&) = delete;
|
Waiter& operator=(const Waiter&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static VOID CALLBACK OnEvent(
|
static VOID CALLBACK OnEvent(PTP_CALLBACK_INSTANCE instance,
|
||||||
PTP_CALLBACK_INSTANCE instance,
|
PVOID context,
|
||||||
PVOID context,
|
PTP_WAIT wait,
|
||||||
PTP_WAIT wait,
|
TP_WAIT_RESULT waitResult);
|
||||||
TP_WAIT_RESULT waitResult);
|
|
||||||
|
|
||||||
Utils::Handle m_handle;
|
Utils::Handle m_handle;
|
||||||
Callback m_callback;
|
Callback m_callback;
|
||||||
std::unique_ptr<TP_WAIT, decltype(&::CloseThreadpoolWait)> m_wait;
|
std::unique_ptr<TP_WAIT, decltype(&::CloseThreadpoolWait)> m_wait;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Connection
|
} // namespace Connection
|
||||||
} // namespace Interprocess
|
} // namespace Interprocess
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,41 +1,31 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <boost/uuid/uuid.hpp>
|
#include <boost/uuid/uuid.hpp>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Connection {
|
||||||
{
|
|
||||||
namespace Connection
|
|
||||||
{
|
|
||||||
|
|
||||||
// EndPointInfo struct encapsulates the connection end point
|
// EndPointInfo struct encapsulates the connection end point
|
||||||
// information across process boundaries.
|
// information across process boundaries.
|
||||||
struct EndPointInfo
|
struct EndPointInfo {
|
||||||
{
|
explicit EndPointInfo(std::uint32_t pid = 0U,
|
||||||
explicit EndPointInfo(
|
const boost::uuids::uuid& uuid = {})
|
||||||
std::uint32_t pid = 0U,
|
: m_pid{pid}, m_uuid{uuid} {}
|
||||||
const boost::uuids::uuid& uuid = {})
|
|
||||||
: m_pid{ pid }
|
|
||||||
, m_uuid{ uuid }
|
|
||||||
{}
|
|
||||||
|
|
||||||
bool operator==(const EndPointInfo& other) const
|
bool operator==(const EndPointInfo& other) const {
|
||||||
{
|
return (m_pid == other.m_pid) && (m_uuid == other.m_uuid);
|
||||||
return (m_pid == other.m_pid)
|
}
|
||||||
&& (m_uuid == other.m_uuid);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool operator<(const EndPointInfo& other) const
|
bool operator<(const EndPointInfo& other) const {
|
||||||
{
|
return m_uuid < other.m_uuid;
|
||||||
return m_uuid < other.m_uuid;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
std::uint32_t m_pid;
|
std::uint32_t m_pid;
|
||||||
boost::uuids::uuid m_uuid;
|
boost::uuids::uuid m_uuid;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Connection
|
} // namespace Connection
|
||||||
} // namespace Interprocess
|
} // namespace Interprocess
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -3,29 +3,23 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include "Interprocess/Connection/EndPointInfo.h"
|
#include "Interprocess/Connection/EndPointInfo.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Connection {
|
||||||
{
|
|
||||||
namespace Connection
|
|
||||||
{
|
|
||||||
|
|
||||||
// EndPointInfoFactory creates an EndPointInfo object with the current
|
// EndPointInfoFactory creates an EndPointInfo object with the current
|
||||||
// process id and a random uuid.
|
// process id and a random uuid.
|
||||||
class EndPointInfoFactory
|
class EndPointInfoFactory {
|
||||||
{
|
public:
|
||||||
public:
|
EndPointInfo Create() const;
|
||||||
EndPointInfo Create() const;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// StringConverter provides a functionality to convert EndPointInfo to a string.
|
// StringConverter provides a functionality to convert EndPointInfo to a string.
|
||||||
class StringConverter
|
class StringConverter {
|
||||||
{
|
public:
|
||||||
public:
|
std::string operator()(const EndPointInfo& endPoint) const;
|
||||||
std::string operator()(const EndPointInfo& endPoint) const;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Connection
|
} // namespace Connection
|
||||||
} // namespace Interprocess
|
} // namespace Interprocess
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -2,18 +2,13 @@
|
||||||
|
|
||||||
#include <boost/interprocess/containers/list.hpp>
|
#include <boost/interprocess/containers/list.hpp>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Container {
|
||||||
{
|
|
||||||
namespace Container
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
template <typename T, typename Allocator>
|
template <typename T, typename Allocator>
|
||||||
using List = boost::interprocess::list<T, Allocator>;
|
using List = boost::interprocess::list<T, Allocator>;
|
||||||
|
|
||||||
|
} // namespace Container
|
||||||
} // namespace Container
|
} // namespace Interprocess
|
||||||
} // namespace Interprocess
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -2,18 +2,14 @@
|
||||||
|
|
||||||
#include <boost/interprocess/containers/string.hpp>
|
#include <boost/interprocess/containers/string.hpp>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Container {
|
||||||
{
|
|
||||||
namespace Container
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
template <typename Allocator>
|
template <typename Allocator>
|
||||||
using String = boost::interprocess::basic_string<char, std::char_traits<char>, Allocator>;
|
using String =
|
||||||
|
boost::interprocess::basic_string<char, std::char_traits<char>, Allocator>;
|
||||||
|
|
||||||
|
} // namespace Container
|
||||||
} // namespace Container
|
} // namespace Interprocess
|
||||||
} // namespace Interprocess
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -2,18 +2,13 @@
|
||||||
|
|
||||||
#include <boost/interprocess/containers/vector.hpp>
|
#include <boost/interprocess/containers/vector.hpp>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Container {
|
||||||
{
|
|
||||||
namespace Container
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
template <typename T, typename Allocator>
|
template <typename T, typename Allocator>
|
||||||
using Vector = boost::interprocess::vector<T, Allocator>;
|
using Vector = boost::interprocess::vector<T, Allocator>;
|
||||||
|
|
||||||
|
} // namespace Container
|
||||||
} // namespace Container
|
} // namespace Interprocess
|
||||||
} // namespace Interprocess
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -4,34 +4,31 @@
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
#include "Utils/Windows.h"
|
#include "Utils/Windows.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Utils {
|
||||||
{
|
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
// Handle is a RAII class that manages the life time of the given HANDLE.
|
// Handle is a RAII class that manages the life time of the given HANDLE.
|
||||||
class Handle
|
class Handle {
|
||||||
{
|
public:
|
||||||
public:
|
// If verifyHandle is true, it checks whether a given handle is valid.
|
||||||
// If verifyHandle is true, it checks whether a given handle is valid.
|
explicit Handle(HANDLE handle, bool verifyHandle = false);
|
||||||
explicit Handle(HANDLE handle, bool verifyHandle = false);
|
|
||||||
|
|
||||||
Handle(Handle&& other);
|
Handle(Handle&& other);
|
||||||
|
|
||||||
explicit operator HANDLE() const;
|
explicit operator HANDLE() const;
|
||||||
|
|
||||||
Handle(const Handle&) = delete;
|
Handle(const Handle&) = delete;
|
||||||
Handle& operator=(const Handle&) = delete;
|
Handle& operator=(const Handle&) = delete;
|
||||||
Handle& operator=(Handle&&) = delete;
|
Handle& operator=(Handle&&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HANDLE Verify(HANDLE handle, bool verifyHandle) const;
|
HANDLE Verify(HANDLE handle, bool verifyHandle) const;
|
||||||
|
|
||||||
std::unique_ptr<std::remove_pointer_t<HANDLE>, decltype(&::CloseHandle)> m_handle;
|
std::unique_ptr<std::remove_pointer_t<HANDLE>, decltype(&::CloseHandle)>
|
||||||
|
m_handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Utils
|
} // namespace Utils
|
||||||
} // namespace Interprocess
|
} // namespace Interprocess
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -4,52 +4,42 @@
|
||||||
#include "EpochManager.h"
|
#include "EpochManager.h"
|
||||||
#include "HashTableManager.h"
|
#include "HashTableManager.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace LocalMemory {
|
||||||
namespace LocalMemory
|
|
||||||
{
|
|
||||||
|
|
||||||
class Context : private EpochRefPolicy<EpochManager::TheEpochRefManager>
|
class Context : private EpochRefPolicy<EpochManager::TheEpochRefManager> {
|
||||||
{
|
public:
|
||||||
public:
|
Context(HashTableManager& hashTableManager,
|
||||||
Context(
|
EpochManager::TheEpochRefManager& epochRefManager)
|
||||||
HashTableManager& hashTableManager,
|
: EpochRefPolicy<EpochManager::TheEpochRefManager>(epochRefManager),
|
||||||
EpochManager::TheEpochRefManager& epochRefManager)
|
m_hashTableManager{hashTableManager} {}
|
||||||
: EpochRefPolicy<EpochManager::TheEpochRefManager>(epochRefManager)
|
|
||||||
, m_hashTableManager{ hashTableManager }
|
|
||||||
{}
|
|
||||||
|
|
||||||
Context(Context&& context)
|
Context(Context&& context)
|
||||||
: EpochRefPolicy<EpochManager::TheEpochRefManager>(std::move(context))
|
: EpochRefPolicy<EpochManager::TheEpochRefManager>(std::move(context)),
|
||||||
, m_hashTableManager{ context.m_hashTableManager }
|
m_hashTableManager{context.m_hashTableManager} {}
|
||||||
{}
|
|
||||||
|
|
||||||
const IReadOnlyHashTable& operator[](const char* name) const
|
const IReadOnlyHashTable& operator[](const char* name) const {
|
||||||
{
|
return m_hashTableManager.GetHashTable(name);
|
||||||
return m_hashTableManager.GetHashTable(name);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
IWritableHashTable& operator[](const char* name)
|
IWritableHashTable& operator[](const char* name) {
|
||||||
{
|
return m_hashTableManager.GetHashTable(name);
|
||||||
return m_hashTableManager.GetHashTable(name);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
const IReadOnlyHashTable& operator[](std::size_t index) const
|
const IReadOnlyHashTable& operator[](std::size_t index) const {
|
||||||
{
|
return m_hashTableManager.GetHashTable(index);
|
||||||
return m_hashTableManager.GetHashTable(index);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
IWritableHashTable& operator[](std::size_t index)
|
IWritableHashTable& operator[](std::size_t index) {
|
||||||
{
|
return m_hashTableManager.GetHashTable(index);
|
||||||
return m_hashTableManager.GetHashTable(index);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Context(const Context&) = delete;
|
Context(const Context&) = delete;
|
||||||
Context& operator=(const Context&) = delete;
|
Context& operator=(const Context&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HashTableManager& m_hashTableManager;
|
HashTableManager& m_hashTableManager;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace LocalMemory
|
} // namespace LocalMemory
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -10,119 +10,113 @@
|
||||||
#include "Utils/Lock.h"
|
#include "Utils/Lock.h"
|
||||||
#include "Utils/RunningThread.h"
|
#include "Utils/RunningThread.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace LocalMemory {
|
||||||
namespace LocalMemory
|
|
||||||
{
|
|
||||||
|
|
||||||
// EpochManager aggregates epoch-related functionalities such as adding/removing
|
// EpochManager aggregates epoch-related functionalities such as adding/removing
|
||||||
// client epoch queues, registering/performing actions, and updating the epoch counters.
|
// client epoch queues, registering/performing actions, and updating the epoch
|
||||||
class EpochManager : public IEpochActionManager
|
// counters.
|
||||||
{
|
class EpochManager : public IEpochActionManager {
|
||||||
public:
|
public:
|
||||||
using TheEpochQueue = EpochQueue<
|
using TheEpochQueue =
|
||||||
boost::shared_lock_guard<Utils::ReaderWriterLockSlim>,
|
EpochQueue<boost::shared_lock_guard<Utils::ReaderWriterLockSlim>,
|
||||||
std::lock_guard<Utils::ReaderWriterLockSlim>>;
|
std::lock_guard<Utils::ReaderWriterLockSlim>>;
|
||||||
|
|
||||||
using TheEpochRefManager = EpochRefManager<TheEpochQueue>;
|
using TheEpochRefManager = EpochRefManager<TheEpochQueue>;
|
||||||
|
|
||||||
EpochManager(
|
EpochManager(const EpochManagerConfig& config, ServerPerfData& perfData)
|
||||||
const EpochManagerConfig& config,
|
: m_perfData{perfData},
|
||||||
ServerPerfData& perfData)
|
m_config{config},
|
||||||
: m_perfData{ perfData }
|
m_currentEpochCounter{0U},
|
||||||
, m_config{ config }
|
m_epochQueue{m_currentEpochCounter, m_config.m_epochQueueSize},
|
||||||
, m_currentEpochCounter{ 0U }
|
m_epochRefManager{m_epochQueue},
|
||||||
, m_epochQueue{
|
m_epochCounterManager{m_epochQueue},
|
||||||
m_currentEpochCounter,
|
m_epochActionManager{config.m_numActionQueues},
|
||||||
m_config.m_epochQueueSize }
|
m_processingThread{m_config.m_epochProcessingInterval, [this] {
|
||||||
, m_epochRefManager{ m_epochQueue }
|
this->Remove();
|
||||||
, m_epochCounterManager{ m_epochQueue }
|
this->Add();
|
||||||
, m_epochActionManager{ config.m_numActionQueues }
|
}} {}
|
||||||
, m_processingThread{
|
|
||||||
m_config.m_epochProcessingInterval,
|
|
||||||
[this]
|
|
||||||
{
|
|
||||||
this->Remove();
|
|
||||||
this->Add();
|
|
||||||
}}
|
|
||||||
{}
|
|
||||||
|
|
||||||
TheEpochRefManager& GetEpochRefManager()
|
TheEpochRefManager& GetEpochRefManager() { return m_epochRefManager; }
|
||||||
{
|
|
||||||
return m_epochRefManager;
|
|
||||||
}
|
|
||||||
|
|
||||||
void RegisterAction(Action&& action) override
|
void RegisterAction(Action&& action) override {
|
||||||
{
|
m_epochActionManager.RegisterAction(m_currentEpochCounter,
|
||||||
m_epochActionManager.RegisterAction(m_currentEpochCounter, std::move(action));
|
std::move(action));
|
||||||
m_perfData.Increment(ServerPerfCounter::PendingActionsCount);
|
m_perfData.Increment(ServerPerfCounter::PendingActionsCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
EpochManager(const EpochManager&) = delete;
|
EpochManager(const EpochManager&) = delete;
|
||||||
EpochManager& operator=(const EpochManager&) = delete;
|
EpochManager& operator=(const EpochManager&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using TheEpochCounterManager = EpochCounterManager<TheEpochQueue>;
|
using TheEpochCounterManager = EpochCounterManager<TheEpochQueue>;
|
||||||
|
|
||||||
using ProcessingThread = Utils::RunningThread<std::function<void()>>;
|
using ProcessingThread = Utils::RunningThread<std::function<void()>>;
|
||||||
|
|
||||||
// Enqueues a new epoch whose counter value is last counter + 1.
|
// Enqueues a new epoch whose counter value is last counter + 1.
|
||||||
// This is called from the server side.
|
// This is called from the server side.
|
||||||
void Add()
|
void Add() {
|
||||||
{
|
// Incrementing the global epoch counter before incrementing per-connection
|
||||||
// Incrementing the global epoch counter before incrementing per-connection
|
// epoch counter is safe (not so the other way around). If the server
|
||||||
// epoch counter is safe (not so the other way around). If the server process is
|
// process is registering an action at the m_currentEpochCounter in
|
||||||
// registering an action at the m_currentEpochCounter in RegisterAction(),
|
// RegisterAction(), it is happening in the "future," and this means that if
|
||||||
// it is happening in the "future," and this means that if the client is referencing
|
// the client is referencing the memory to be deleted in the "future," it
|
||||||
// the memory to be deleted in the "future," it will be safe.
|
// will be safe.
|
||||||
++m_currentEpochCounter;
|
++m_currentEpochCounter;
|
||||||
|
|
||||||
m_epochCounterManager.AddNewEpoch();
|
m_epochCounterManager.AddNewEpoch();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dequeues any epochs whose ref counter is 0, meaning there is no reference at that time.
|
// Dequeues any epochs whose ref counter is 0, meaning there is no reference
|
||||||
void Remove()
|
// at that time.
|
||||||
{
|
void Remove() {
|
||||||
const auto oldestEpochCounter = m_epochCounterManager.RemoveUnreferenceEpochCounters();
|
const auto oldestEpochCounter =
|
||||||
|
m_epochCounterManager.RemoveUnreferenceEpochCounters();
|
||||||
|
|
||||||
const auto numActionsPerformed = m_epochActionManager.PerformActions(oldestEpochCounter);
|
const auto numActionsPerformed =
|
||||||
|
m_epochActionManager.PerformActions(oldestEpochCounter);
|
||||||
|
|
||||||
m_perfData.Subtract(ServerPerfCounter::PendingActionsCount, numActionsPerformed);
|
m_perfData.Subtract(ServerPerfCounter::PendingActionsCount,
|
||||||
m_perfData.Set(ServerPerfCounter::LastPerformedActionsCount, numActionsPerformed);
|
numActionsPerformed);
|
||||||
m_perfData.Set(ServerPerfCounter::OldestEpochCounterInQueue, oldestEpochCounter);
|
m_perfData.Set(ServerPerfCounter::LastPerformedActionsCount,
|
||||||
m_perfData.Set(ServerPerfCounter::LatestEpochCounterInQueue, m_currentEpochCounter);
|
numActionsPerformed);
|
||||||
}
|
m_perfData.Set(ServerPerfCounter::OldestEpochCounterInQueue,
|
||||||
|
oldestEpochCounter);
|
||||||
|
m_perfData.Set(ServerPerfCounter::LatestEpochCounterInQueue,
|
||||||
|
m_currentEpochCounter);
|
||||||
|
}
|
||||||
|
|
||||||
// Reference to the performance data.
|
// Reference to the performance data.
|
||||||
ServerPerfData& m_perfData;
|
ServerPerfData& m_perfData;
|
||||||
|
|
||||||
// Configuration related to epoch manager.
|
// Configuration related to epoch manager.
|
||||||
EpochManagerConfig m_config;
|
EpochManagerConfig m_config;
|
||||||
|
|
||||||
// The global current epoch counter.
|
// The global current epoch counter.
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
std::atomic_uint64_t m_currentEpochCounter;
|
std::atomic_uint64_t m_currentEpochCounter;
|
||||||
#else
|
#else
|
||||||
std::atomic<std::uint64_t> m_currentEpochCounter;
|
std::atomic<std::uint64_t> m_currentEpochCounter;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Epoch queue.
|
// Epoch queue.
|
||||||
TheEpochQueue m_epochQueue;
|
TheEpochQueue m_epochQueue;
|
||||||
|
|
||||||
// Handles adding/decrementing ref counts.
|
// Handles adding/decrementing ref counts.
|
||||||
TheEpochRefManager m_epochRefManager;
|
TheEpochRefManager m_epochRefManager;
|
||||||
|
|
||||||
// Handles adding new epoch and finding the epoch counts that have zero ref counts.
|
// Handles adding new epoch and finding the epoch counts that have zero ref
|
||||||
TheEpochCounterManager m_epochCounterManager;
|
// counts.
|
||||||
|
TheEpochCounterManager m_epochCounterManager;
|
||||||
|
|
||||||
// Handles registering/performing actions.
|
// Handles registering/performing actions.
|
||||||
EpochActionManager m_epochActionManager;
|
EpochActionManager m_epochActionManager;
|
||||||
|
|
||||||
// Thread responsible for updating the current epoch counter,
|
// Thread responsible for updating the current epoch counter,
|
||||||
// removing the unreferenced epoch counter, etc.
|
// removing the unreferenced epoch counter, etc.
|
||||||
// Should be the last member so that it gets destroyed first.
|
// Should be the last member so that it gets destroyed first.
|
||||||
ProcessingThread m_processingThread;
|
ProcessingThread m_processingThread;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace LocalMemory
|
} // namespace LocalMemory
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -3,104 +3,98 @@
|
||||||
#include <boost/any.hpp>
|
#include <boost/any.hpp>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "LocalMemory/Memory.h"
|
|
||||||
#include "Epoch/IEpochActionManager.h"
|
#include "Epoch/IEpochActionManager.h"
|
||||||
|
#include "HashTable/Cache/HashTable.h"
|
||||||
#include "HashTable/Config.h"
|
#include "HashTable/Config.h"
|
||||||
#include "HashTable/ReadWrite/HashTable.h"
|
#include "HashTable/ReadWrite/HashTable.h"
|
||||||
#include "HashTable/ReadWrite/Serializer.h"
|
#include "HashTable/ReadWrite/Serializer.h"
|
||||||
#include "HashTable/Cache/HashTable.h"
|
#include "LocalMemory/Memory.h"
|
||||||
#include "Utils/Containers.h"
|
#include "Utils/Containers.h"
|
||||||
#include "Utils/Exception.h"
|
#include "Utils/Exception.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace LocalMemory {
|
||||||
namespace LocalMemory
|
|
||||||
{
|
|
||||||
|
|
||||||
class HashTableManager
|
class HashTableManager {
|
||||||
{
|
public:
|
||||||
public:
|
template <typename Allocator>
|
||||||
template <typename Allocator>
|
std::size_t Add(const HashTableConfig& config,
|
||||||
std::size_t Add(
|
IEpochActionManager& epochActionManager,
|
||||||
const HashTableConfig& config,
|
Allocator allocator) {
|
||||||
IEpochActionManager& epochActionManager,
|
if (m_hashTableNameToIndex.find(config.m_name) !=
|
||||||
Allocator allocator)
|
m_hashTableNameToIndex.end()) {
|
||||||
{
|
throw RuntimeException("Same hash table name already exists.");
|
||||||
if (m_hashTableNameToIndex.find(config.m_name) != m_hashTableNameToIndex.end())
|
}
|
||||||
{
|
|
||||||
throw RuntimeException("Same hash table name already exists.");
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& cacheConfig = config.m_cache;
|
const auto& cacheConfig = config.m_cache;
|
||||||
const auto& serializerConfig = config.m_serializer;
|
const auto& serializerConfig = config.m_serializer;
|
||||||
|
|
||||||
if (cacheConfig && serializerConfig)
|
if (cacheConfig && serializerConfig) {
|
||||||
{
|
throw RuntimeException(
|
||||||
throw RuntimeException(
|
"Constructing cache hash table via serializer is not supported.");
|
||||||
"Constructing cache hash table via serializer is not supported.");
|
}
|
||||||
}
|
|
||||||
|
|
||||||
using namespace HashTable;
|
using namespace HashTable;
|
||||||
|
|
||||||
using InternalHashTable = typename ReadWrite::WritableHashTable<Allocator>::HashTable;
|
using InternalHashTable =
|
||||||
using Memory = typename LocalMemory::Memory<Allocator>;
|
typename ReadWrite::WritableHashTable<Allocator>::HashTable;
|
||||||
|
using Memory = typename LocalMemory::Memory<Allocator>;
|
||||||
|
|
||||||
Memory memory{ allocator };
|
Memory memory{allocator};
|
||||||
|
|
||||||
std::shared_ptr<InternalHashTable> internalHashTable = (serializerConfig && serializerConfig->m_stream != nullptr)
|
std::shared_ptr<InternalHashTable> internalHashTable =
|
||||||
? ReadWrite::Deserializer<Memory, InternalHashTable, ReadWrite::WritableHashTable>(
|
(serializerConfig && serializerConfig->m_stream != nullptr)
|
||||||
serializerConfig->m_properties.get_value_or(HashTableConfig::Serializer::Properties())).
|
? ReadWrite::Deserializer<Memory, InternalHashTable,
|
||||||
Deserialize(
|
ReadWrite::WritableHashTable>(
|
||||||
memory,
|
serializerConfig->m_properties.get_value_or(
|
||||||
*(serializerConfig->m_stream))
|
HashTableConfig::Serializer::Properties()))
|
||||||
|
.Deserialize(memory, *(serializerConfig->m_stream))
|
||||||
: memory.template MakeUnique<InternalHashTable>(
|
: memory.template MakeUnique<InternalHashTable>(
|
||||||
typename InternalHashTable::Setting{
|
typename InternalHashTable::Setting{
|
||||||
config.m_setting.m_numBuckets,
|
config.m_setting.m_numBuckets,
|
||||||
(std::max)(config.m_setting.m_numBucketsPerMutex.get_value_or(1U), 1U),
|
(std::max)(
|
||||||
config.m_setting.m_fixedKeySize.get_value_or(0U),
|
config.m_setting.m_numBucketsPerMutex.get_value_or(
|
||||||
config.m_setting.m_fixedValueSize.get_value_or(0U) },
|
1U),
|
||||||
memory.GetAllocator());
|
1U),
|
||||||
|
config.m_setting.m_fixedKeySize.get_value_or(0U),
|
||||||
|
config.m_setting.m_fixedValueSize.get_value_or(0U)},
|
||||||
|
memory.GetAllocator());
|
||||||
|
|
||||||
auto hashTable =
|
auto hashTable =
|
||||||
cacheConfig
|
cacheConfig ? std::make_unique<Cache::WritableHashTable<Allocator>>(
|
||||||
? std::make_unique<Cache::WritableHashTable<Allocator>>(
|
*internalHashTable, epochActionManager,
|
||||||
*internalHashTable,
|
cacheConfig->m_maxCacheSizeInBytes,
|
||||||
epochActionManager,
|
cacheConfig->m_recordTimeToLive,
|
||||||
cacheConfig->m_maxCacheSizeInBytes,
|
cacheConfig->m_forceTimeBasedEviction)
|
||||||
cacheConfig->m_recordTimeToLive,
|
: std::make_unique<ReadWrite::WritableHashTable<Allocator>>(
|
||||||
cacheConfig->m_forceTimeBasedEviction)
|
*internalHashTable, epochActionManager);
|
||||||
: std::make_unique<ReadWrite::WritableHashTable<Allocator>>(
|
|
||||||
*internalHashTable,
|
|
||||||
epochActionManager);
|
|
||||||
|
|
||||||
m_internalHashTables.emplace_back(std::move(internalHashTable));
|
m_internalHashTables.emplace_back(std::move(internalHashTable));
|
||||||
m_hashTables.emplace_back(std::move(hashTable));
|
m_hashTables.emplace_back(std::move(hashTable));
|
||||||
|
|
||||||
const auto newIndex = m_hashTables.size() - 1;
|
const auto newIndex = m_hashTables.size() - 1;
|
||||||
|
|
||||||
m_hashTableNameToIndex.emplace(config.m_name, newIndex);
|
m_hashTableNameToIndex.emplace(config.m_name, newIndex);
|
||||||
|
|
||||||
return newIndex;
|
return newIndex;
|
||||||
}
|
}
|
||||||
|
|
||||||
IWritableHashTable& GetHashTable(const char* name)
|
IWritableHashTable& GetHashTable(const char* name) {
|
||||||
{
|
assert(m_hashTableNameToIndex.find(name) != m_hashTableNameToIndex.cend());
|
||||||
assert(m_hashTableNameToIndex.find(name) != m_hashTableNameToIndex.cend());
|
return GetHashTable(m_hashTableNameToIndex.find(name)->second);
|
||||||
return GetHashTable(m_hashTableNameToIndex.find(name)->second);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
IWritableHashTable& GetHashTable(std::size_t index)
|
IWritableHashTable& GetHashTable(std::size_t index) {
|
||||||
{
|
assert(index < m_hashTables.size());
|
||||||
assert(index < m_hashTables.size());
|
return *m_hashTables[index];
|
||||||
return *m_hashTables[index];
|
}
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Utils::StdStringKeyMap<std::size_t> m_hashTableNameToIndex;
|
Utils::StdStringKeyMap<std::size_t> m_hashTableNameToIndex;
|
||||||
|
|
||||||
std::vector<boost::any> m_internalHashTables;
|
std::vector<boost::any> m_internalHashTables;
|
||||||
std::vector<std::unique_ptr<IWritableHashTable>> m_hashTables;
|
std::vector<std::unique_ptr<IWritableHashTable>> m_hashTables;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace LocalMemory
|
} // namespace LocalMemory
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -5,42 +5,35 @@
|
||||||
#include "HashTable/Config.h"
|
#include "HashTable/Config.h"
|
||||||
#include "Log/PerfCounter.h"
|
#include "Log/PerfCounter.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace LocalMemory {
|
||||||
namespace LocalMemory
|
|
||||||
{
|
|
||||||
|
|
||||||
class HashTableService
|
class HashTableService {
|
||||||
{
|
public:
|
||||||
public:
|
explicit HashTableService(
|
||||||
explicit HashTableService(
|
const EpochManagerConfig& epochManagerConfig = EpochManagerConfig())
|
||||||
const EpochManagerConfig& epochManagerConfig = EpochManagerConfig())
|
: m_epochManager{epochManagerConfig, m_serverPerfData} {}
|
||||||
: m_epochManager{ epochManagerConfig, m_serverPerfData }
|
|
||||||
{}
|
|
||||||
|
|
||||||
template <typename Allocator = std::allocator<void>>
|
template <typename Allocator = std::allocator<void>>
|
||||||
std::size_t AddHashTable(
|
std::size_t AddHashTable(const HashTableConfig& config,
|
||||||
const HashTableConfig& config,
|
Allocator allocator = Allocator()) {
|
||||||
Allocator allocator = Allocator())
|
return m_hashTableManager.Add(config, m_epochManager, allocator);
|
||||||
{
|
}
|
||||||
return m_hashTableManager.Add(config, m_epochManager, allocator);
|
|
||||||
}
|
|
||||||
|
|
||||||
Context GetContext()
|
Context GetContext() {
|
||||||
{
|
return Context(m_hashTableManager, m_epochManager.GetEpochRefManager());
|
||||||
return Context(m_hashTableManager, m_epochManager.GetEpochRefManager());
|
}
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ServerPerfData m_serverPerfData;
|
ServerPerfData m_serverPerfData;
|
||||||
|
|
||||||
HashTableManager m_hashTableManager;
|
HashTableManager m_hashTableManager;
|
||||||
|
|
||||||
// Make sure HashTableManager is destroyed before EpochManager b/c
|
// Make sure HashTableManager is destroyed before EpochManager b/c
|
||||||
// it is possible that EpochManager could be processing Epoch Actions
|
// it is possible that EpochManager could be processing Epoch Actions
|
||||||
// on hash tables.
|
// on hash tables.
|
||||||
EpochManager m_epochManager;
|
EpochManager m_epochManager;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace LocalMemory
|
} // namespace LocalMemory
|
||||||
} // namespace L4
|
} // namespace L4
|
|
@ -1,50 +1,40 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace LocalMemory {
|
||||||
namespace LocalMemory
|
|
||||||
{
|
|
||||||
|
|
||||||
// Simple local memory model that stores the given allocator object.
|
// Simple local memory model that stores the given allocator object.
|
||||||
template <typename Alloc>
|
template <typename Alloc>
|
||||||
class Memory
|
class Memory {
|
||||||
{
|
public:
|
||||||
public:
|
using Allocator = Alloc;
|
||||||
using Allocator = Alloc;
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
using UniquePtr = std::unique_ptr<T>;
|
using UniquePtr = std::unique_ptr<T>;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
using Deleter = typename std::default_delete<T>;
|
using Deleter = typename std::default_delete<T>;
|
||||||
|
|
||||||
explicit Memory(Allocator allocator = Allocator())
|
explicit Memory(Allocator allocator = Allocator()) : m_allocator{allocator} {}
|
||||||
: m_allocator{ allocator }
|
|
||||||
{}
|
|
||||||
|
|
||||||
template <typename T, typename... Args>
|
template <typename T, typename... Args>
|
||||||
auto MakeUnique(Args&&... args)
|
auto MakeUnique(Args&&... args) {
|
||||||
{
|
return std::make_unique<T>(std::forward<Args>(args)...);
|
||||||
return std::make_unique<T>(std::forward<Args>(args)...);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Allocator GetAllocator()
|
Allocator GetAllocator() { return Allocator(m_allocator); }
|
||||||
{
|
|
||||||
return Allocator(m_allocator);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
auto GetDeleter()
|
auto GetDeleter() {
|
||||||
{
|
return Deleter<T>();
|
||||||
return Deleter<T>();
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Memory(const Memory&) = delete;
|
Memory(const Memory&) = delete;
|
||||||
Memory& operator=(const Memory&) = delete;
|
Memory& operator=(const Memory&) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Allocator m_allocator;
|
Allocator m_allocator;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace LocalMemory
|
} // namespace LocalMemory
|
||||||
} // namespace L4
|
} // namespace L4
|
|
@ -3,36 +3,29 @@
|
||||||
#include <map>
|
#include <map>
|
||||||
#include "PerfCounter.h"
|
#include "PerfCounter.h"
|
||||||
|
|
||||||
|
namespace L4 {
|
||||||
namespace L4
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
// IPerfLogger interface.
|
// IPerfLogger interface.
|
||||||
struct IPerfLogger
|
struct IPerfLogger {
|
||||||
{
|
struct IData;
|
||||||
struct IData;
|
|
||||||
|
|
||||||
virtual ~IPerfLogger() = default;
|
virtual ~IPerfLogger() = default;
|
||||||
|
|
||||||
virtual void Log(const IData& data) = 0;
|
virtual void Log(const IData& data) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
// IPerfLogger::IData interface that provides access to ServerPerfData and the aggregated HashTablePerfData.
|
// IPerfLogger::IData interface that provides access to ServerPerfData and the
|
||||||
// Note that the user of IPerfLogger only needs to implement IPerfLogger since IPerfLogger::IData is
|
// aggregated HashTablePerfData. Note that the user of IPerfLogger only needs to
|
||||||
// implemented internally.
|
// implement IPerfLogger since IPerfLogger::IData is implemented internally.
|
||||||
struct IPerfLogger::IData
|
struct IPerfLogger::IData {
|
||||||
{
|
using HashTablesPerfData =
|
||||||
using HashTablesPerfData = std::map<
|
std::map<std::string, std::reference_wrapper<const HashTablePerfData>>;
|
||||||
std::string,
|
|
||||||
std::reference_wrapper<const HashTablePerfData>>;
|
|
||||||
|
|
||||||
virtual ~IData() = default;
|
virtual ~IData() = default;
|
||||||
|
|
||||||
virtual const ServerPerfData& GetServerPerfData() const = 0;
|
virtual const ServerPerfData& GetServerPerfData() const = 0;
|
||||||
|
|
||||||
virtual const HashTablesPerfData& GetHashTablesPerfData() const = 0;
|
virtual const HashTablesPerfData& GetHashTablesPerfData() const = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -1,223 +1,191 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <limits>
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
enum class ServerPerfCounter : std::uint16_t
|
enum class ServerPerfCounter : std::uint16_t {
|
||||||
{
|
// Connection Manager
|
||||||
// Connection Manager
|
ClientConnectionsCount = 0U,
|
||||||
ClientConnectionsCount = 0U,
|
|
||||||
|
|
||||||
// EpochManager
|
// EpochManager
|
||||||
OldestEpochCounterInQueue,
|
OldestEpochCounterInQueue,
|
||||||
LatestEpochCounterInQueue,
|
LatestEpochCounterInQueue,
|
||||||
PendingActionsCount,
|
PendingActionsCount,
|
||||||
LastPerformedActionsCount,
|
LastPerformedActionsCount,
|
||||||
|
|
||||||
Count
|
Count
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::array<
|
const std::array<const char*,
|
||||||
const char*,
|
static_cast<std::uint16_t>(ServerPerfCounter::Count)>
|
||||||
static_cast<std::uint16_t>(ServerPerfCounter::Count)> c_serverPerfCounterNames =
|
c_serverPerfCounterNames = {
|
||||||
{
|
// Connection Manager
|
||||||
// Connection Manager
|
"ClientConnectionsCount",
|
||||||
"ClientConnectionsCount",
|
|
||||||
|
|
||||||
// EpochManager
|
// EpochManager
|
||||||
"OldestEpochCounterInQueue",
|
"OldestEpochCounterInQueue", "LatestEpochCounterInQueue",
|
||||||
"LatestEpochCounterInQueue",
|
"PendingActionsCount", "LastPerformedActionsCount"};
|
||||||
"PendingActionsCount",
|
|
||||||
"LastPerformedActionsCount"
|
enum class HashTablePerfCounter : std::uint16_t {
|
||||||
};
|
RecordsCount = 0U,
|
||||||
|
BucketsCount,
|
||||||
enum class HashTablePerfCounter : std::uint16_t
|
TotalKeySize,
|
||||||
{
|
TotalValueSize,
|
||||||
RecordsCount = 0U,
|
TotalIndexSize,
|
||||||
BucketsCount,
|
ChainingEntriesCount,
|
||||||
TotalKeySize,
|
|
||||||
TotalValueSize,
|
// Max/Min counters are always increasing. In other words, we don't keep track
|
||||||
TotalIndexSize,
|
// of the next max record size, when the max record is deleted.
|
||||||
ChainingEntriesCount,
|
MinKeySize,
|
||||||
|
MaxKeySize,
|
||||||
// Max/Min counters are always increasing. In other words, we don't keep track
|
MinValueSize,
|
||||||
// of the next max record size, when the max record is deleted.
|
MaxValueSize,
|
||||||
MinKeySize,
|
MaxBucketChainLength,
|
||||||
MaxKeySize,
|
|
||||||
MinValueSize,
|
RecordsCountLoadedFromSerializer,
|
||||||
MaxValueSize,
|
RecordsCountSavedFromSerializer,
|
||||||
MaxBucketChainLength,
|
|
||||||
|
// CacheHashTable specific counters.
|
||||||
RecordsCountLoadedFromSerializer,
|
CacheHitCount,
|
||||||
RecordsCountSavedFromSerializer,
|
CacheMissCount,
|
||||||
|
EvictedRecordsCount,
|
||||||
// CacheHashTable specific counters.
|
|
||||||
CacheHitCount,
|
Count
|
||||||
CacheMissCount,
|
|
||||||
EvictedRecordsCount,
|
|
||||||
|
|
||||||
Count
|
|
||||||
};
|
|
||||||
|
|
||||||
const std::array<
|
|
||||||
const char*,
|
|
||||||
static_cast<std::uint16_t>(HashTablePerfCounter::Count)> c_hashTablePerfCounterNames =
|
|
||||||
{
|
|
||||||
"RecordsCount",
|
|
||||||
"BucketsCount",
|
|
||||||
"TotalKeySize",
|
|
||||||
"TotalValueSize",
|
|
||||||
"TotalIndexSize",
|
|
||||||
"ChainingEntriesCount",
|
|
||||||
"MinKeySize",
|
|
||||||
"MaxKeySize",
|
|
||||||
"MinValueSize",
|
|
||||||
"MaxValueSize",
|
|
||||||
"MaxBucketChainLength",
|
|
||||||
"RecordsCountLoadedFromSerializer",
|
|
||||||
"RecordsCountSavedFromSerializer",
|
|
||||||
"CacheHitCount",
|
|
||||||
"CacheMissCount",
|
|
||||||
"EvictedRecordsCount"
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const std::array<const char*,
|
||||||
|
static_cast<std::uint16_t>(HashTablePerfCounter::Count)>
|
||||||
|
c_hashTablePerfCounterNames = {"RecordsCount",
|
||||||
|
"BucketsCount",
|
||||||
|
"TotalKeySize",
|
||||||
|
"TotalValueSize",
|
||||||
|
"TotalIndexSize",
|
||||||
|
"ChainingEntriesCount",
|
||||||
|
"MinKeySize",
|
||||||
|
"MaxKeySize",
|
||||||
|
"MinValueSize",
|
||||||
|
"MaxValueSize",
|
||||||
|
"MaxBucketChainLength",
|
||||||
|
"RecordsCountLoadedFromSerializer",
|
||||||
|
"RecordsCountSavedFromSerializer",
|
||||||
|
"CacheHitCount",
|
||||||
|
"CacheMissCount",
|
||||||
|
"EvictedRecordsCount"};
|
||||||
|
|
||||||
template <typename TCounterEnum>
|
template <typename TCounterEnum>
|
||||||
class PerfCounters
|
class PerfCounters {
|
||||||
{
|
public:
|
||||||
public:
|
typedef std::int64_t TValue;
|
||||||
typedef std::int64_t TValue;
|
typedef std::atomic<TValue> TCounter;
|
||||||
typedef std::atomic<TValue> TCounter;
|
|
||||||
|
|
||||||
PerfCounters()
|
PerfCounters() {
|
||||||
{
|
std::for_each(std::begin(m_counters), std::end(m_counters),
|
||||||
std::for_each(
|
[](TCounter& counter) { counter = 0; });
|
||||||
std::begin(m_counters),
|
}
|
||||||
std::end(m_counters),
|
|
||||||
[] (TCounter& counter)
|
// Note that since the ordering doesn't matter when the counter is updated,
|
||||||
{
|
// memory_order_relaxed is used for all perf counter updates. More from
|
||||||
counter = 0;
|
// http://en.cppreference.com/w/cpp/atomic/memory_order: Typical use for
|
||||||
});
|
// relaxed memory ordering is updating counters, such as the reference
|
||||||
|
// counters of std::shared_ptr, since this only requires atomicity, but not
|
||||||
|
// ordering or synchronization.
|
||||||
|
TValue Get(TCounterEnum counterEnum) const {
|
||||||
|
return m_counters[static_cast<std::uint16_t>(counterEnum)].load(
|
||||||
|
std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Set(TCounterEnum counterEnum, TValue value) {
|
||||||
|
m_counters[static_cast<std::uint16_t>(counterEnum)].store(
|
||||||
|
value, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Increment(TCounterEnum counterEnum) {
|
||||||
|
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_add(
|
||||||
|
1, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Decrement(TCounterEnum counterEnum) {
|
||||||
|
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_sub(
|
||||||
|
1, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Add(TCounterEnum counterEnum, TValue value) {
|
||||||
|
if (value != 0) {
|
||||||
|
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_add(
|
||||||
|
value, std::memory_order_relaxed);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Note that since the ordering doesn't matter when the counter is updated, memory_order_relaxed
|
void Subtract(TCounterEnum counterEnum, TValue value) {
|
||||||
// is used for all perf counter updates.
|
if (value != 0) {
|
||||||
// More from http://en.cppreference.com/w/cpp/atomic/memory_order:
|
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_sub(
|
||||||
// Typical use for relaxed memory ordering is updating counters, such as the reference counters
|
value, std::memory_order_relaxed);
|
||||||
// of std::shared_ptr, since this only requires atomicity, but not ordering or synchronization.
|
|
||||||
TValue Get(TCounterEnum counterEnum) const
|
|
||||||
{
|
|
||||||
return m_counters[static_cast<std::uint16_t>(counterEnum)].load(std::memory_order_relaxed);
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Set(TCounterEnum counterEnum, TValue value)
|
void Max(TCounterEnum counterEnum, TValue value) {
|
||||||
{
|
auto& counter = m_counters[static_cast<std::uint16_t>(counterEnum)];
|
||||||
m_counters[static_cast<std::uint16_t>(counterEnum)].store(value, std::memory_order_relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Increment(TCounterEnum counterEnum)
|
TValue startValue = counter.load(std::memory_order_acquire);
|
||||||
{
|
|
||||||
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_add(1, std::memory_order_relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Decrement(TCounterEnum counterEnum)
|
do {
|
||||||
{
|
// "load()" from counter is needed only once since the value of Max is
|
||||||
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_sub(1, std::memory_order_relaxed);
|
// monotonically increasing. If startValue is changed by other threads,
|
||||||
}
|
// compare_exchange_strong will return false and startValue will be
|
||||||
|
// written to the latest value, thus returning to this code path.
|
||||||
|
if (startValue > value) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} while (!counter.compare_exchange_strong(startValue, value,
|
||||||
|
std::memory_order_release,
|
||||||
|
std::memory_order_acquire));
|
||||||
|
}
|
||||||
|
|
||||||
void Add(TCounterEnum counterEnum, TValue value)
|
void Min(TCounterEnum counterEnum, TValue value) {
|
||||||
{
|
auto& counter = m_counters[static_cast<std::uint16_t>(counterEnum)];
|
||||||
if (value != 0)
|
|
||||||
{
|
|
||||||
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_add(value, std::memory_order_relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Subtract(TCounterEnum counterEnum, TValue value)
|
TValue startValue = counter.load(std::memory_order_acquire);
|
||||||
{
|
do {
|
||||||
if (value != 0)
|
// Check the comment in Max() and Min() is monotonically decreasing.
|
||||||
{
|
if (startValue < value) {
|
||||||
m_counters[static_cast<std::uint16_t>(counterEnum)].fetch_sub(value, std::memory_order_relaxed);
|
return;
|
||||||
}
|
}
|
||||||
}
|
} while (!counter.compare_exchange_strong(startValue, value,
|
||||||
|
std::memory_order_release,
|
||||||
|
std::memory_order_acquire));
|
||||||
|
}
|
||||||
|
|
||||||
void Max(TCounterEnum counterEnum, TValue value)
|
private:
|
||||||
{
|
|
||||||
auto& counter = m_counters[static_cast<std::uint16_t>(counterEnum)];
|
|
||||||
|
|
||||||
TValue startValue = counter.load(std::memory_order_acquire);
|
|
||||||
|
|
||||||
do
|
|
||||||
{
|
|
||||||
// "load()" from counter is needed only once since the value of Max is
|
|
||||||
// monotonically increasing. If startValue is changed by other threads,
|
|
||||||
// compare_exchange_strong will return false and startValue will be
|
|
||||||
// written to the latest value, thus returning to this code path.
|
|
||||||
if (startValue > value)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while (!counter.compare_exchange_strong(
|
|
||||||
startValue,
|
|
||||||
value,
|
|
||||||
std::memory_order_release,
|
|
||||||
std::memory_order_acquire));
|
|
||||||
}
|
|
||||||
|
|
||||||
void Min(TCounterEnum counterEnum, TValue value)
|
|
||||||
{
|
|
||||||
auto& counter = m_counters[static_cast<std::uint16_t>(counterEnum)];
|
|
||||||
|
|
||||||
TValue startValue = counter.load(std::memory_order_acquire);
|
|
||||||
do
|
|
||||||
{
|
|
||||||
// Check the comment in Max() and Min() is monotonically decreasing.
|
|
||||||
if (startValue < value)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while (!counter.compare_exchange_strong(
|
|
||||||
startValue,
|
|
||||||
value,
|
|
||||||
std::memory_order_release,
|
|
||||||
std::memory_order_acquire));
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
__declspec(align(8)) TCounter m_counters[TCounterEnum::Count];
|
__declspec(align(8)) TCounter m_counters[TCounterEnum::Count];
|
||||||
#else
|
#else
|
||||||
#if defined(__GNUC__)
|
#if defined(__GNUC__)
|
||||||
TCounter m_counters[static_cast<size_t>(TCounterEnum::Count)]
|
TCounter m_counters[static_cast<size_t>(TCounterEnum::Count)]
|
||||||
__attribute__((aligned(8)));
|
__attribute__((aligned(8)));
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef PerfCounters<ServerPerfCounter> ServerPerfData;
|
typedef PerfCounters<ServerPerfCounter> ServerPerfData;
|
||||||
|
|
||||||
struct HashTablePerfData : public PerfCounters<HashTablePerfCounter>
|
struct HashTablePerfData : public PerfCounters<HashTablePerfCounter> {
|
||||||
{
|
HashTablePerfData() {
|
||||||
HashTablePerfData()
|
// Initialize any min counters to the max value.
|
||||||
{
|
const auto maxValue =
|
||||||
// Initialize any min counters to the max value.
|
(std::numeric_limits<HashTablePerfData::TValue>::max)();
|
||||||
const auto maxValue = (std::numeric_limits<HashTablePerfData::TValue>::max)();
|
|
||||||
|
|
||||||
Set(HashTablePerfCounter::MinValueSize, maxValue);
|
Set(HashTablePerfCounter::MinValueSize, maxValue);
|
||||||
Set(HashTablePerfCounter::MinKeySize, maxValue);
|
Set(HashTablePerfCounter::MinKeySize, maxValue);
|
||||||
|
|
||||||
// MaxBucketChainLength starts with 1 since bucket already
|
// MaxBucketChainLength starts with 1 since bucket already
|
||||||
// contains the entry which stores the data.
|
// contains the entry which stores the data.
|
||||||
Set(HashTablePerfCounter::MaxBucketChainLength, 1);
|
Set(HashTablePerfCounter::MaxBucketChainLength, 1);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -2,55 +2,48 @@
|
||||||
|
|
||||||
#include "IPerfLogger.h"
|
#include "IPerfLogger.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
struct PerfLoggerManagerConfig;
|
struct PerfLoggerManagerConfig;
|
||||||
|
|
||||||
|
// PerfData class, which holds the ServerPerfData and HashTablePerfData for each
|
||||||
|
// hash table. Note that PerfData owns the ServerPerfData but has only the const
|
||||||
|
// references to HashTablePerfData, which is owned by the HashTable.
|
||||||
|
|
||||||
// PerfData class, which holds the ServerPerfData and HashTablePerfData for each hash table.
|
class PerfData : public IPerfLogger::IData {
|
||||||
// Note that PerfData owns the ServerPerfData but has only the const references to HashTablePerfData,
|
public:
|
||||||
// which is owned by the HashTable.
|
PerfData() = default;
|
||||||
|
|
||||||
class PerfData : public IPerfLogger::IData
|
ServerPerfData& GetServerPerfData();
|
||||||
{
|
|
||||||
public:
|
|
||||||
PerfData() = default;
|
|
||||||
|
|
||||||
ServerPerfData& GetServerPerfData();
|
const ServerPerfData& GetServerPerfData() const override;
|
||||||
|
|
||||||
const ServerPerfData& GetServerPerfData() const override;
|
const HashTablesPerfData& GetHashTablesPerfData() const override;
|
||||||
|
|
||||||
const HashTablesPerfData& GetHashTablesPerfData() const override;
|
void AddHashTablePerfData(const char* hashTableName,
|
||||||
|
const HashTablePerfData& perfData);
|
||||||
|
|
||||||
void AddHashTablePerfData(const char* hashTableName, const HashTablePerfData& perfData);
|
PerfData(const PerfData&) = delete;
|
||||||
|
PerfData& operator=(const PerfData&) = delete;
|
||||||
|
|
||||||
PerfData(const PerfData&) = delete;
|
private:
|
||||||
PerfData& operator=(const PerfData&) = delete;
|
ServerPerfData m_serverPerfData;
|
||||||
|
HashTablesPerfData m_hashTablesPerfData;
|
||||||
private:
|
|
||||||
ServerPerfData m_serverPerfData;
|
|
||||||
HashTablesPerfData m_hashTablesPerfData;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// PerfData inline implementations.
|
// PerfData inline implementations.
|
||||||
|
|
||||||
inline ServerPerfData& PerfData::GetServerPerfData()
|
inline ServerPerfData& PerfData::GetServerPerfData() {
|
||||||
{
|
return m_serverPerfData;
|
||||||
return m_serverPerfData;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline const ServerPerfData& PerfData::GetServerPerfData() const
|
inline const ServerPerfData& PerfData::GetServerPerfData() const {
|
||||||
{
|
return m_serverPerfData;
|
||||||
return m_serverPerfData;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline const PerfData::HashTablesPerfData& PerfData::GetHashTablesPerfData() const
|
inline const PerfData::HashTablesPerfData& PerfData::GetHashTablesPerfData()
|
||||||
{
|
const {
|
||||||
return m_hashTablesPerfData;
|
return m_hashTablesPerfData;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -3,62 +3,48 @@
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <iosfwd>
|
#include <iosfwd>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// SerializerHelper provides help functions to write to IStreamWriter.
|
// SerializerHelper provides help functions to write to IStreamWriter.
|
||||||
class SerializerHelper
|
class SerializerHelper {
|
||||||
{
|
public:
|
||||||
public:
|
SerializerHelper(std::ostream& stream) : m_stream{stream} {}
|
||||||
SerializerHelper(std::ostream& stream)
|
|
||||||
: m_stream{ stream }
|
|
||||||
{}
|
|
||||||
|
|
||||||
SerializerHelper(const SerializerHelper&) = delete;
|
SerializerHelper(const SerializerHelper&) = delete;
|
||||||
SerializerHelper& operator=(const SerializerHelper&) = delete;
|
SerializerHelper& operator=(const SerializerHelper&) = delete;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void Serialize(const T& obj)
|
void Serialize(const T& obj) {
|
||||||
{
|
m_stream.write(reinterpret_cast<const char*>(&obj), sizeof(obj));
|
||||||
m_stream.write(reinterpret_cast<const char*>(&obj), sizeof(obj));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void Serialize(const void* data, std::uint32_t dataSize)
|
void Serialize(const void* data, std::uint32_t dataSize) {
|
||||||
{
|
m_stream.write(static_cast<const char*>(data), dataSize);
|
||||||
m_stream.write(static_cast<const char*>(data), dataSize);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::ostream& m_stream;
|
std::ostream& m_stream;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// DeserializerHelper provides help functions to read from IStreamReader.
|
// DeserializerHelper provides help functions to read from IStreamReader.
|
||||||
class DeserializerHelper
|
class DeserializerHelper {
|
||||||
{
|
public:
|
||||||
public:
|
DeserializerHelper(std::istream& stream) : m_stream{stream} {}
|
||||||
DeserializerHelper(std::istream& stream)
|
|
||||||
: m_stream{ stream }
|
|
||||||
{}
|
|
||||||
|
|
||||||
DeserializerHelper(const DeserializerHelper&) = delete;
|
DeserializerHelper(const DeserializerHelper&) = delete;
|
||||||
DeserializerHelper& operator=(const DeserializerHelper&) = delete;
|
DeserializerHelper& operator=(const DeserializerHelper&) = delete;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void Deserialize(T& obj)
|
void Deserialize(T& obj) {
|
||||||
{
|
m_stream.read(reinterpret_cast<char*>(&obj), sizeof(obj));
|
||||||
m_stream.read(reinterpret_cast<char*>(&obj), sizeof(obj));
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void Deserialize(void* data, std::uint32_t dataSize)
|
void Deserialize(void* data, std::uint32_t dataSize) {
|
||||||
{
|
m_stream.read(static_cast<char*>(data), dataSize);
|
||||||
m_stream.read(static_cast<char*>(data), dataSize);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::istream& m_stream;
|
std::istream& m_stream;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace L4
|
||||||
} // namespace L4
|
|
||||||
|
|
||||||
|
|
|
@ -1,60 +1,56 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <cstdint>
|
|
||||||
#include <boost/version.hpp>
|
|
||||||
#include <boost/interprocess/offset_ptr.hpp>
|
#include <boost/interprocess/offset_ptr.hpp>
|
||||||
|
#include <boost/version.hpp>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Utils {
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
// AtomicOffsetPtr provides a way to atomically update the offset pointer.
|
// AtomicOffsetPtr provides a way to atomically update the offset pointer.
|
||||||
// The current boost::interprocess::offset_ptr cannot be used with std::atomic<> because
|
// The current boost::interprocess::offset_ptr cannot be used with std::atomic<>
|
||||||
// the class is not trivially copyable. AtomicOffsetPtr borrows the same concept to calculate
|
// because the class is not trivially copyable. AtomicOffsetPtr borrows the same
|
||||||
// the pointer address based on the offset (boost::interprocess::ipcdetail::offset_ptr_to* functions
|
// concept to calculate the pointer address based on the offset
|
||||||
// are reused).
|
// (boost::interprocess::ipcdetail::offset_ptr_to* functions are reused). Note
|
||||||
// Note that ->, *, copy/assignment operators are not implemented intentionally so that
|
// that ->, *, copy/assignment operators are not implemented intentionally so
|
||||||
// the user (inside this library) is aware of what he is intended to do without accidentally
|
// that the user (inside this library) is aware of what he is intended to do
|
||||||
// incurring any performance hits.
|
// without accidentally incurring any performance hits.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class AtomicOffsetPtr
|
class AtomicOffsetPtr {
|
||||||
{
|
public:
|
||||||
public:
|
AtomicOffsetPtr() : m_offset(1) {}
|
||||||
AtomicOffsetPtr()
|
|
||||||
: m_offset(1)
|
|
||||||
{}
|
|
||||||
|
|
||||||
AtomicOffsetPtr(const AtomicOffsetPtr&) = delete;
|
AtomicOffsetPtr(const AtomicOffsetPtr&) = delete;
|
||||||
AtomicOffsetPtr& operator=(const AtomicOffsetPtr&) = delete;
|
AtomicOffsetPtr& operator=(const AtomicOffsetPtr&) = delete;
|
||||||
|
|
||||||
T* Load(std::memory_order memoryOrder = std::memory_order_seq_cst) const
|
T* Load(std::memory_order memoryOrder = std::memory_order_seq_cst) const {
|
||||||
{
|
return static_cast<T*>(
|
||||||
return static_cast<T*>(
|
boost::interprocess::ipcdetail::offset_ptr_to_raw_pointer(
|
||||||
boost::interprocess::ipcdetail::offset_ptr_to_raw_pointer(
|
this, m_offset.load(memoryOrder)));
|
||||||
this,
|
}
|
||||||
m_offset.load(memoryOrder)));
|
|
||||||
}
|
|
||||||
|
|
||||||
void Store(T* ptr, std::memory_order memoryOrder = std::memory_order_seq_cst)
|
void Store(T* ptr,
|
||||||
{
|
std::memory_order memoryOrder = std::memory_order_seq_cst) {
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
m_offset.store(boost::interprocess::ipcdetail::offset_ptr_to_offset(ptr, this), memoryOrder);
|
m_offset.store(
|
||||||
|
boost::interprocess::ipcdetail::offset_ptr_to_offset(ptr, this),
|
||||||
|
memoryOrder);
|
||||||
#else
|
#else
|
||||||
m_offset.store(boost::interprocess::ipcdetail::offset_ptr_to_offset<std::uintptr_t>(ptr, this), memoryOrder);
|
m_offset.store(
|
||||||
|
boost::interprocess::ipcdetail::offset_ptr_to_offset<std::uintptr_t>(
|
||||||
|
ptr, this),
|
||||||
|
memoryOrder);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
std::atomic_uint64_t m_offset;
|
std::atomic_uint64_t m_offset;
|
||||||
#else
|
#else
|
||||||
std::atomic<std::uint64_t> m_offset;
|
std::atomic<std::uint64_t> m_offset;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace Utils
|
||||||
} // namespace Utils
|
} // namespace L4
|
||||||
} // namespace L4
|
|
||||||
|
|
|
@ -2,23 +2,16 @@
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
|
||||||
|
namespace L4 {
|
||||||
|
namespace Utils {
|
||||||
|
|
||||||
namespace L4
|
class EpochClock {
|
||||||
{
|
public:
|
||||||
namespace Utils
|
std::chrono::seconds GetCurrentEpochTime() const {
|
||||||
{
|
return std::chrono::duration_cast<std::chrono::seconds>(
|
||||||
|
std::chrono::high_resolution_clock::now().time_since_epoch());
|
||||||
|
}
|
||||||
class EpochClock
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
std::chrono::seconds GetCurrentEpochTime() const
|
|
||||||
{
|
|
||||||
return std::chrono::duration_cast<std::chrono::seconds>(
|
|
||||||
std::chrono::high_resolution_clock::now().time_since_epoch());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace Utils
|
||||||
} // namespace Utils
|
} // namespace L4
|
||||||
} // namespace L4
|
|
||||||
|
|
|
@ -1,72 +1,62 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <boost/functional/hash.hpp>
|
||||||
#include <cctype>
|
#include <cctype>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <boost/functional/hash.hpp>
|
|
||||||
|
|
||||||
#if defined(__GNUC__)
|
#if defined(__GNUC__)
|
||||||
#define _stricmp strcasecmp
|
#define _stricmp strcasecmp
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Utils {
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
|
// CaseInsensitiveStdStringComparer is a STL-compatible case-insensitive ANSI
|
||||||
// CaseInsensitiveStdStringComparer is a STL-compatible case-insensitive ANSI std::string comparer.
|
// std::string comparer.
|
||||||
struct CaseInsensitiveStdStringComparer
|
struct CaseInsensitiveStdStringComparer {
|
||||||
{
|
bool operator()(const std::string& str1, const std::string& str2) const {
|
||||||
bool operator()(const std::string& str1, const std::string& str2) const
|
return _stricmp(str1.c_str(), str2.c_str()) == 0;
|
||||||
{
|
}
|
||||||
return _stricmp(str1.c_str(), str2.c_str()) == 0;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// CaseInsensitiveStringComparer is a STL-compatible case-insensitive ANSI string comparer.
|
// CaseInsensitiveStringComparer is a STL-compatible case-insensitive ANSI
|
||||||
struct CaseInsensitiveStringComparer
|
// string comparer.
|
||||||
{
|
struct CaseInsensitiveStringComparer {
|
||||||
bool operator()(const char* const str1, const char* const str2) const
|
bool operator()(const char* const str1, const char* const str2) const {
|
||||||
{
|
return _stricmp(str1, str2) == 0;
|
||||||
return _stricmp(str1, str2) == 0;
|
}
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI std::string hasher.
|
// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI
|
||||||
struct CaseInsensitiveStdStringHasher
|
// std::string hasher.
|
||||||
{
|
struct CaseInsensitiveStdStringHasher {
|
||||||
std::size_t operator()(const std::string& str) const
|
std::size_t operator()(const std::string& str) const {
|
||||||
{
|
std::size_t seed = 0;
|
||||||
std::size_t seed = 0;
|
|
||||||
|
|
||||||
for (auto c : str)
|
for (auto c : str) {
|
||||||
{
|
boost::hash_combine(seed, std::toupper(c));
|
||||||
boost::hash_combine(seed, std::toupper(c));
|
|
||||||
}
|
|
||||||
|
|
||||||
return seed;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return seed;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI string hasher.
|
// CaseInsensitiveStringHasher is a STL-compatible case-insensitive ANSI string
|
||||||
struct CaseInsensitiveStringHasher
|
// hasher.
|
||||||
{
|
struct CaseInsensitiveStringHasher {
|
||||||
std::size_t operator()(const char* str) const
|
std::size_t operator()(const char* str) const {
|
||||||
{
|
assert(str != nullptr);
|
||||||
assert(str != nullptr);
|
|
||||||
|
|
||||||
std::size_t seed = 0;
|
std::size_t seed = 0;
|
||||||
|
|
||||||
while (*str)
|
while (*str) {
|
||||||
{
|
boost::hash_combine(seed, std::toupper(*str++));
|
||||||
boost::hash_combine(seed, std::toupper(*str++));
|
|
||||||
}
|
|
||||||
|
|
||||||
return seed;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return seed;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace Utils
|
||||||
} // namespace Utils
|
} // namespace L4
|
||||||
} // namespace L4
|
|
||||||
|
|
|
@ -1,45 +1,37 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <boost/functional/hash.hpp>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <boost/functional/hash.hpp>
|
|
||||||
#include "Utils/ComparerHasher.h"
|
#include "Utils/ComparerHasher.h"
|
||||||
|
|
||||||
|
namespace L4 {
|
||||||
|
namespace Utils {
|
||||||
|
|
||||||
namespace L4
|
// StdStringKeyMap is an unordered_map where the key is std::string. It is
|
||||||
{
|
// slower than StringKeyMap above, but it owns the memory of the string, so it's
|
||||||
namespace Utils
|
// easier to use.
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
// StdStringKeyMap is an unordered_map where the key is std::string. It is slower than
|
|
||||||
// StringKeyMap above, but it owns the memory of the string, so it's easier to use.
|
|
||||||
template <typename TValue>
|
template <typename TValue>
|
||||||
using StdStringKeyMap = std::unordered_map<
|
using StdStringKeyMap =
|
||||||
std::string,
|
std::unordered_map<std::string,
|
||||||
TValue,
|
TValue,
|
||||||
Utils::CaseInsensitiveStdStringHasher,
|
Utils::CaseInsensitiveStdStringHasher,
|
||||||
Utils::CaseInsensitiveStdStringComparer>;
|
Utils::CaseInsensitiveStdStringComparer>;
|
||||||
|
|
||||||
// StringKeyMap is an unordered_map where the key is const char*.
|
// StringKeyMap is an unordered_map where the key is const char*.
|
||||||
// The memory of the key is not owned by StringKeyMap,
|
// The memory of the key is not owned by StringKeyMap,
|
||||||
// but it is faster (than StdStringKeyMap below) for look up.
|
// but it is faster (than StdStringKeyMap below) for look up.
|
||||||
template <typename TValue>
|
template <typename TValue>
|
||||||
using StringKeyMap = std::unordered_map<
|
using StringKeyMap = std::unordered_map<const char*,
|
||||||
const char*,
|
TValue,
|
||||||
TValue,
|
Utils::CaseInsensitiveStringHasher,
|
||||||
Utils::CaseInsensitiveStringHasher,
|
Utils::CaseInsensitiveStringComparer>;
|
||||||
Utils::CaseInsensitiveStringComparer>;
|
|
||||||
|
|
||||||
// IntegerKeyMap using boost::hash and std::equal_to comparer and hasher.
|
// IntegerKeyMap using boost::hash and std::equal_to comparer and hasher.
|
||||||
template <typename TKey, typename TValue>
|
template <typename TKey, typename TValue>
|
||||||
using IntegerKeyMap = std::unordered_map<
|
using IntegerKeyMap =
|
||||||
TKey,
|
std::unordered_map<TKey, TValue, boost::hash<TKey>, std::equal_to<TKey>>;
|
||||||
TValue,
|
|
||||||
boost::hash<TKey>,
|
|
||||||
std::equal_to<TKey>>;
|
|
||||||
|
|
||||||
|
} // namespace Utils
|
||||||
} // namespace Utils
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -1,22 +1,18 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// RuntimeException class used across L4 library.
|
// RuntimeException class used across L4 library.
|
||||||
class RuntimeException : public std::runtime_error
|
class RuntimeException : public std::runtime_error {
|
||||||
{
|
public:
|
||||||
public:
|
explicit RuntimeException(const std::string& message)
|
||||||
explicit RuntimeException(const std::string& message)
|
: std::runtime_error(message.c_str()) {}
|
||||||
: std::runtime_error(message.c_str())
|
|
||||||
{}
|
|
||||||
|
|
||||||
explicit RuntimeException(const char* message)
|
explicit RuntimeException(const char* message)
|
||||||
: std::runtime_error(message)
|
: std::runtime_error(message) {}
|
||||||
{}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -12,150 +12,96 @@
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
namespace L4 {
|
||||||
namespace L4
|
namespace Utils {
|
||||||
{
|
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
|
|
||||||
// Represents a RAII wrapper for Win32 CRITICAL_SECTION.
|
// Represents a RAII wrapper for Win32 CRITICAL_SECTION.
|
||||||
class CriticalSection : protected ::CRITICAL_SECTION
|
class CriticalSection : protected ::CRITICAL_SECTION {
|
||||||
{
|
public:
|
||||||
public:
|
// Constructs and initializes the critical section.
|
||||||
// Constructs and initializes the critical section.
|
CriticalSection() { ::InitializeCriticalSection(this); }
|
||||||
CriticalSection()
|
|
||||||
{
|
|
||||||
::InitializeCriticalSection(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
CriticalSection(const CriticalSection& other) = delete;
|
CriticalSection(const CriticalSection& other) = delete;
|
||||||
CriticalSection& operator=(const CriticalSection& other) = delete;
|
CriticalSection& operator=(const CriticalSection& other) = delete;
|
||||||
|
|
||||||
// Destructs the critical section.
|
// Destructs the critical section.
|
||||||
~CriticalSection()
|
~CriticalSection() { ::DeleteCriticalSection(this); }
|
||||||
{
|
|
||||||
::DeleteCriticalSection(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Waits for ownership of the critical section.
|
// Waits for ownership of the critical section.
|
||||||
void lock()
|
void lock() { ::EnterCriticalSection(this); }
|
||||||
{
|
|
||||||
::EnterCriticalSection(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Releases ownership of the critical section.
|
// Releases ownership of the critical section.
|
||||||
void unlock()
|
void unlock() { ::LeaveCriticalSection(this); }
|
||||||
{
|
|
||||||
::LeaveCriticalSection(this);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Represents a RAII wrapper for Win32 SRW lock.
|
// Represents a RAII wrapper for Win32 SRW lock.
|
||||||
class ReaderWriterLockSlim
|
class ReaderWriterLockSlim {
|
||||||
{
|
public:
|
||||||
public:
|
// Constructs and initializes an SRW lock.
|
||||||
// Constructs and initializes an SRW lock.
|
ReaderWriterLockSlim() { ::InitializeSRWLock(&m_lock); }
|
||||||
ReaderWriterLockSlim()
|
|
||||||
{
|
|
||||||
::InitializeSRWLock(&m_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete;
|
ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete;
|
||||||
ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete;
|
ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete;
|
||||||
|
|
||||||
// Acquires an SRW lock in shared mode.
|
// Acquires an SRW lock in shared mode.
|
||||||
void lock_shared()
|
void lock_shared() { ::AcquireSRWLockShared(&m_lock); }
|
||||||
{
|
|
||||||
::AcquireSRWLockShared(&m_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Acquires an SRW lock in exclusive mode.
|
// Acquires an SRW lock in exclusive mode.
|
||||||
void lock()
|
void lock() { ::AcquireSRWLockExclusive(&m_lock); }
|
||||||
{
|
|
||||||
::AcquireSRWLockExclusive(&m_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Releases an SRW lock that was opened in shared mode.
|
// Releases an SRW lock that was opened in shared mode.
|
||||||
void unlock_shared()
|
void unlock_shared() { ::ReleaseSRWLockShared(&m_lock); }
|
||||||
{
|
|
||||||
::ReleaseSRWLockShared(&m_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Releases an SRW lock that was opened in exclusive mode.
|
// Releases an SRW lock that was opened in exclusive mode.
|
||||||
void unlock()
|
void unlock() { ::ReleaseSRWLockExclusive(&m_lock); }
|
||||||
{
|
|
||||||
::ReleaseSRWLockExclusive(&m_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Stores the Win32 SRW lock.
|
// Stores the Win32 SRW lock.
|
||||||
::SRWLOCK m_lock;
|
::SRWLOCK m_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#if defined(__GNUC__)
|
#if defined(__GNUC__)
|
||||||
|
|
||||||
class CriticalSection
|
class CriticalSection {
|
||||||
{
|
public:
|
||||||
public:
|
CriticalSection() : m_mutex{} {}
|
||||||
CriticalSection()
|
|
||||||
: m_mutex{}
|
|
||||||
{}
|
|
||||||
|
|
||||||
CriticalSection(const CriticalSection& other) = delete;
|
CriticalSection(const CriticalSection& other) = delete;
|
||||||
CriticalSection& operator=(const CriticalSection& other) = delete;
|
CriticalSection& operator=(const CriticalSection& other) = delete;
|
||||||
|
|
||||||
~CriticalSection() = default;
|
~CriticalSection() = default;
|
||||||
|
|
||||||
void lock()
|
void lock() { pthread_mutex_lock(&m_mutex); }
|
||||||
{
|
|
||||||
pthread_mutex_lock(&m_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlock()
|
void unlock() { pthread_mutex_unlock(&m_mutex); }
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&m_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
pthread_mutex_t m_mutex;
|
pthread_mutex_t m_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
class ReaderWriterLockSlim
|
class ReaderWriterLockSlim {
|
||||||
{
|
public:
|
||||||
public:
|
ReaderWriterLockSlim() = default;
|
||||||
ReaderWriterLockSlim() = default;
|
ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete;
|
||||||
ReaderWriterLockSlim(const ReaderWriterLockSlim& other) = delete;
|
ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete;
|
||||||
ReaderWriterLockSlim& operator=(const ReaderWriterLockSlim& other) = delete;
|
|
||||||
|
|
||||||
void lock_shared()
|
void lock_shared() { pthread_rwlock_rdlock(&m_lock); }
|
||||||
{
|
|
||||||
pthread_rwlock_rdlock(&m_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void lock()
|
void lock() { pthread_rwlock_wrlock(&m_lock); }
|
||||||
{
|
|
||||||
pthread_rwlock_wrlock(&m_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlock_shared()
|
void unlock_shared() { pthread_rwlock_unlock(&m_lock); }
|
||||||
{
|
|
||||||
pthread_rwlock_unlock(&m_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void unlock()
|
void unlock() { unlock_shared(); }
|
||||||
{
|
|
||||||
unlock_shared();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
pthread_rwlock_t m_lock = PTHREAD_RWLOCK_INITIALIZER;
|
pthread_rwlock_t m_lock = PTHREAD_RWLOCK_INITIALIZER;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
} // namespace Utils
|
} // namespace Utils
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,79 +1,64 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <cstddef>
|
|
||||||
#include <complex>
|
#include <complex>
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
namespace L4 {
|
||||||
namespace L4
|
namespace Utils {
|
||||||
{
|
namespace Math {
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
namespace Math
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
// Rounds up the number to the nearest multiple of base.
|
// Rounds up the number to the nearest multiple of base.
|
||||||
inline std::uint64_t RoundUp(std::uint64_t number, std::uint64_t base)
|
inline std::uint64_t RoundUp(std::uint64_t number, std::uint64_t base) {
|
||||||
{
|
return base ? (((number + base - 1) / base) * base) : number;
|
||||||
return base ? (((number + base - 1) / base) * base) : number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rounds down the number to the nearest multiple of base.
|
// Rounds down the number to the nearest multiple of base.
|
||||||
inline std::uint64_t RoundDown(std::uint64_t number, std::uint64_t base)
|
inline std::uint64_t RoundDown(std::uint64_t number, std::uint64_t base) {
|
||||||
{
|
return base ? ((number / base) * base) : number;
|
||||||
return base ? ((number / base) * base) : number;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if the given number is a power of 2.
|
// Returns true if the given number is a power of 2.
|
||||||
inline bool IsPowerOfTwo(std::uint64_t number)
|
inline bool IsPowerOfTwo(std::uint64_t number) {
|
||||||
{
|
return number && ((number & (number - 1)) == 0);
|
||||||
return number && ((number & (number - 1)) == 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the next highest power of two from the given value.
|
// Returns the next highest power of two from the given value.
|
||||||
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2.
|
// http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2.
|
||||||
inline std::uint32_t NextHighestPowerOfTwo(std::uint32_t val)
|
inline std::uint32_t NextHighestPowerOfTwo(std::uint32_t val) {
|
||||||
{
|
--val;
|
||||||
--val;
|
val |= val >> 1;
|
||||||
val |= val >> 1;
|
val |= val >> 2;
|
||||||
val |= val >> 2;
|
val |= val >> 4;
|
||||||
val |= val >> 4;
|
val |= val >> 8;
|
||||||
val |= val >> 8;
|
val |= val >> 16;
|
||||||
val |= val >> 16;
|
return ++val;
|
||||||
return ++val;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Provides utility functions doing pointer related arithmetics.
|
// Provides utility functions doing pointer related arithmetics.
|
||||||
namespace PointerArithmetic
|
namespace PointerArithmetic {
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
// Returns a new pointer after adding an offset.
|
// Returns a new pointer after adding an offset.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline T* Add(T* ptr, std::size_t offset)
|
inline T* Add(T* ptr, std::size_t offset) {
|
||||||
{
|
return reinterpret_cast<T*>(reinterpret_cast<std::uintptr_t>(ptr) + offset);
|
||||||
return reinterpret_cast<T*>(reinterpret_cast<std::uintptr_t>(ptr) + offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a new pointer after subtracting an offset.
|
// Returns a new pointer after subtracting an offset.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline T* Subtract(T* ptr, std::size_t offset)
|
inline T* Subtract(T* ptr, std::size_t offset) {
|
||||||
{
|
return reinterpret_cast<T*>(reinterpret_cast<std::uintptr_t>(ptr) - offset);
|
||||||
return reinterpret_cast<T*>(reinterpret_cast<std::uintptr_t>(ptr) - offset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the absolute value of difference in the number of bytes between two pointers.
|
// Returns the absolute value of difference in the number of bytes between two
|
||||||
inline std::size_t Distance(const void* lhs, const void* rhs)
|
// pointers.
|
||||||
{
|
inline std::size_t Distance(const void* lhs, const void* rhs) {
|
||||||
return std::abs(reinterpret_cast<std::ptrdiff_t>(lhs) - reinterpret_cast<std::ptrdiff_t>(rhs));
|
return std::abs(reinterpret_cast<std::ptrdiff_t>(lhs) -
|
||||||
|
reinterpret_cast<std::ptrdiff_t>(rhs));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // namespace PointerArithmetic
|
||||||
|
|
||||||
} // namespace PointerArithmetic
|
} // namespace Math
|
||||||
|
} // namespace Utils
|
||||||
|
} // namespace L4
|
||||||
} // namespace Math
|
|
||||||
} // namespace Utils
|
|
||||||
} // namespace L4
|
|
|
@ -4,53 +4,41 @@
|
||||||
|
|
||||||
#include <boost/lexical_cast.hpp>
|
#include <boost/lexical_cast.hpp>
|
||||||
|
|
||||||
|
namespace L4 {
|
||||||
namespace L4
|
namespace Utils {
|
||||||
{
|
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
// Properties class represents a string to string map (case insensitive).
|
// Properties class represents a string to string map (case insensitive).
|
||||||
// It can be used where the configurations should be generic.
|
// It can be used where the configurations should be generic.
|
||||||
class Properties : public StdStringKeyMap<std::string>
|
class Properties : public StdStringKeyMap<std::string> {
|
||||||
{
|
public:
|
||||||
public:
|
using Base = Utils::StdStringKeyMap<std::string>;
|
||||||
using Base = Utils::StdStringKeyMap<std::string>;
|
using Value = Base::value_type;
|
||||||
using Value = Base::value_type;
|
|
||||||
|
|
||||||
Properties() = default;
|
Properties() = default;
|
||||||
|
|
||||||
// Expose a constructor with initializer_list for convenience.
|
// Expose a constructor with initializer_list for convenience.
|
||||||
Properties(std::initializer_list<Value> values)
|
Properties(std::initializer_list<Value> values) : Base(values) {}
|
||||||
: Base(values)
|
|
||||||
{
|
// Returns true if the given key exists and the value associated with
|
||||||
|
// the key can be converted to the TValue type. If the conversion fails, the
|
||||||
|
// value of the given val is guaranteed to remain the same.
|
||||||
|
template <typename TValue>
|
||||||
|
bool TryGet(const std::string& key, TValue& val) const {
|
||||||
|
const auto it = find(key);
|
||||||
|
if (it == end()) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if the given key exists and the value associated with
|
TValue tmp;
|
||||||
// the key can be converted to the TValue type. If the conversion fails, the value
|
if (!boost::conversion::try_lexical_convert(it->second, tmp)) {
|
||||||
// of the given val is guaranteed to remain the same.
|
return false;
|
||||||
template <typename TValue>
|
|
||||||
bool TryGet(const std::string& key, TValue& val) const
|
|
||||||
{
|
|
||||||
const auto it = find(key);
|
|
||||||
if (it == end())
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
TValue tmp;
|
|
||||||
if (!boost::conversion::try_lexical_convert(it->second, tmp))
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
val = tmp;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
val = tmp;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace Utils
|
||||||
} // namespace Utils
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -1,79 +1,60 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <atomic>
|
|
||||||
|
|
||||||
|
|
||||||
namespace L4
|
|
||||||
{
|
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
|
namespace L4 {
|
||||||
|
namespace Utils {
|
||||||
|
|
||||||
// NoOp is a function object that doesn't do anything.
|
// NoOp is a function object that doesn't do anything.
|
||||||
struct NoOp
|
struct NoOp {
|
||||||
{
|
void operator()(...) {}
|
||||||
void operator()(...) {}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// RunningThread wraps around std::thread and repeatedly runs a given function after yielding
|
// RunningThread wraps around std::thread and repeatedly runs a given function
|
||||||
// for the given interval. Note that the destructor waits for the thread to stop.
|
// after yielding for the given interval. Note that the destructor waits for the
|
||||||
|
// thread to stop.
|
||||||
template <typename CoreFunc, typename PrepFunc = NoOp>
|
template <typename CoreFunc, typename PrepFunc = NoOp>
|
||||||
class RunningThread
|
class RunningThread {
|
||||||
{
|
public:
|
||||||
public:
|
RunningThread(std::chrono::milliseconds interval,
|
||||||
RunningThread(
|
CoreFunc coreFunc,
|
||||||
std::chrono::milliseconds interval,
|
PrepFunc prepFunc = PrepFunc())
|
||||||
CoreFunc coreFunc,
|
: m_isRunning(),
|
||||||
PrepFunc prepFunc = PrepFunc())
|
m_thread(&RunningThread::Start, this, interval, coreFunc, prepFunc) {}
|
||||||
: m_isRunning(),
|
|
||||||
m_thread(
|
~RunningThread() {
|
||||||
&RunningThread::Start,
|
m_isRunning.store(false);
|
||||||
this,
|
|
||||||
interval,
|
if (m_thread.joinable()) {
|
||||||
coreFunc,
|
m_thread.join();
|
||||||
prepFunc)
|
|
||||||
{
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
~RunningThread()
|
RunningThread(const RunningThread&) = delete;
|
||||||
{
|
RunningThread& operator=(const RunningThread&) = delete;
|
||||||
m_isRunning.store(false);
|
|
||||||
|
|
||||||
if (m_thread.joinable())
|
private:
|
||||||
{
|
void Start(std::chrono::milliseconds interval,
|
||||||
m_thread.join();
|
CoreFunc coreFunc,
|
||||||
}
|
PrepFunc prepFunc) {
|
||||||
|
m_isRunning.store(true);
|
||||||
|
|
||||||
|
prepFunc();
|
||||||
|
|
||||||
|
while (m_isRunning.load()) {
|
||||||
|
coreFunc();
|
||||||
|
|
||||||
|
std::this_thread::sleep_for(interval);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
RunningThread(const RunningThread&) = delete;
|
std::atomic_bool m_isRunning;
|
||||||
RunningThread& operator=(const RunningThread&) = delete;
|
|
||||||
|
|
||||||
private:
|
std::thread m_thread;
|
||||||
void Start(
|
|
||||||
std::chrono::milliseconds interval,
|
|
||||||
CoreFunc coreFunc,
|
|
||||||
PrepFunc prepFunc)
|
|
||||||
{
|
|
||||||
m_isRunning.store(true);
|
|
||||||
|
|
||||||
prepFunc();
|
|
||||||
|
|
||||||
while (m_isRunning.load())
|
|
||||||
{
|
|
||||||
coreFunc();
|
|
||||||
|
|
||||||
std::this_thread::sleep_for(interval);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::atomic_bool m_isRunning;
|
|
||||||
|
|
||||||
std::thread m_thread;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
} // namespace Utils
|
||||||
} // namespace Utils
|
} // namespace L4
|
||||||
} // namespace L4
|
|
||||||
|
|
|
@ -2,38 +2,38 @@
|
||||||
|
|
||||||
// Allow macro redefinition.
|
// Allow macro redefinition.
|
||||||
#pragma warning(push)
|
#pragma warning(push)
|
||||||
#pragma warning(disable:4005)
|
#pragma warning(disable : 4005)
|
||||||
|
|
||||||
// Explicitly excluding API groups
|
// Explicitly excluding API groups
|
||||||
//#define NOGDICAPMASKS // - CC_*, LC_*, PC_*, CP_*, TC_*, RC_
|
//#define NOGDICAPMASKS // - CC_*, LC_*, PC_*, CP_*, TC_*, RC_
|
||||||
#define NOVIRTUALKEYCODES // - VK_*
|
#define NOVIRTUALKEYCODES // - VK_*
|
||||||
//#define NOWINMESSAGES // - WM_*, EM_*, LB_*, CB_*
|
//#define NOWINMESSAGES // - WM_*, EM_*, LB_*, CB_*
|
||||||
#define NOWINSTYLES // - WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_*
|
#define NOWINSTYLES // - WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_*
|
||||||
#define NOSYSMETRICS // - SM_*
|
#define NOSYSMETRICS // - SM_*
|
||||||
#define NOMENUS // - MF_*
|
#define NOMENUS // - MF_*
|
||||||
#define NOICONS // - IDI_*
|
#define NOICONS // - IDI_*
|
||||||
#define NOKEYSTATES // - MK_*
|
#define NOKEYSTATES // - MK_*
|
||||||
#define NOSYSCOMMANDS // - SC_*
|
#define NOSYSCOMMANDS // - SC_*
|
||||||
#define NORASTEROPS // - Binary and Tertiary raster ops
|
#define NORASTEROPS // - Binary and Tertiary raster ops
|
||||||
#define NOSHOWWINDOW // - SW_*
|
#define NOSHOWWINDOW // - SW_*
|
||||||
#define OEMRESOURCE // - OEM Resource values
|
#define OEMRESOURCE // - OEM Resource values
|
||||||
#define NOATOM // - Atom Manager routines
|
#define NOATOM // - Atom Manager routines
|
||||||
#define NOCLIPBOARD // - Clipboard routines
|
#define NOCLIPBOARD // - Clipboard routines
|
||||||
#define NOCOLOR // - Screen colors
|
#define NOCOLOR // - Screen colors
|
||||||
//#define NOCTLMGR // - Control and Dialog routines
|
//#define NOCTLMGR // - Control and Dialog routines
|
||||||
#define NODRAWTEXT // - DrawText() and DT_*
|
#define NODRAWTEXT // - DrawText() and DT_*
|
||||||
#define NOGDI // - All GDI defines and routines
|
#define NOGDI // - All GDI defines and routines
|
||||||
#define NOKERNEL // - All KERNEL defines and routines
|
#define NOKERNEL // - All KERNEL defines and routines
|
||||||
#define NONLS // - All NLS (natural language interfaces) defines and routines
|
#define NONLS // - All NLS (natural language interfaces) defines and routines
|
||||||
#define NOMB // - MB_* and MessageBox()
|
#define NOMB // - MB_* and MessageBox()
|
||||||
#define NOMEMMGR // - GMEM_*, LMEM_*, GHND, LHND, associated routines
|
#define NOMEMMGR // - GMEM_*, LMEM_*, GHND, LHND, associated routines
|
||||||
#define NOMETAFILE // - typedef METAFILEPICT
|
#define NOMETAFILE // - typedef METAFILEPICT
|
||||||
#define NOMINMAX // - Macros min(a,b) and max(a,b)
|
#define NOMINMAX // - Macros min(a,b) and max(a,b)
|
||||||
//#define NOMSG // - typedef MSG and associated routines
|
//#define NOMSG // - typedef MSG and associated routines
|
||||||
#define NOOPENFILE // - OpenFile(), OemToAnsi, AnsiToOem, and OF_*
|
#define NOOPENFILE // - OpenFile(), OemToAnsi, AnsiToOem, and OF_*
|
||||||
#define NOSCROLL // - SB_* and scrolling routines
|
#define NOSCROLL // - SB_* and scrolling routines
|
||||||
#define NOSERVICE // - All Service Controller routines, SERVICE_ equates, etc.
|
#define NOSERVICE // - All Service Controller routines, SERVICE_ equates, etc.
|
||||||
#define NOSOUND // - Sound driver routines
|
#define NOSOUND // - Sound driver routines
|
||||||
#define NOTEXTMETRIC // - typedef TEXTMETRIC and associated routines
|
#define NOTEXTMETRIC // - typedef TEXTMETRIC and associated routines
|
||||||
#define NOWH // - SetWindowsHook and WH_*
|
#define NOWH // - SetWindowsHook and WH_*
|
||||||
#define NOWINOFFSETS // - GWL_*, GCL_*, associated routines
|
#define NOWINOFFSETS // - GWL_*, GCL_*, associated routines
|
||||||
|
@ -44,14 +44,15 @@
|
||||||
#define NODEFERWINDOWPOS // - DeferWindowPos routines
|
#define NODEFERWINDOWPOS // - DeferWindowPos routines
|
||||||
#define NOMCX // - Modem Configuration Extensions
|
#define NOMCX // - Modem Configuration Extensions
|
||||||
|
|
||||||
// Enabling STRICT redefines certain data types so that the compiler does not permit assignment from one type to another without an explicit cast.
|
// Enabling STRICT redefines certain data types so that the compiler does not
|
||||||
|
// permit assignment from one type to another without an explicit cast.
|
||||||
#define STRICT
|
#define STRICT
|
||||||
|
|
||||||
// Define WIN32_LEAN_AND_MEAN to exclude APIs such as Cryptography, DDE, RPC, Shell, and Windows Sockets.
|
// Define WIN32_LEAN_AND_MEAN to exclude APIs such as Cryptography, DDE, RPC,
|
||||||
// Cryptography is needed due to <boost/uuids/random_generator.hpp>
|
// Shell, and Windows Sockets. Cryptography is needed due to
|
||||||
|
// <boost/uuids/random_generator.hpp>
|
||||||
//#define WIN32_LEAN_AND_MEAN
|
//#define WIN32_LEAN_AND_MEAN
|
||||||
|
|
||||||
#pragma warning(pop)
|
#pragma warning(pop)
|
||||||
|
|
||||||
|
|
||||||
#include <Windows.h>
|
#include <Windows.h>
|
||||||
|
|
|
@ -2,14 +2,10 @@
|
||||||
|
|
||||||
#include <boost/interprocess/detail/utilities.hpp>
|
#include <boost/interprocess/detail/utilities.hpp>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Detail {
|
||||||
namespace Detail
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
using boost::interprocess::ipcdetail::to_raw_pointer;
|
using boost::interprocess::ipcdetail::to_raw_pointer;
|
||||||
|
|
||||||
|
} // namespace Detail
|
||||||
} // namespace Detail
|
} // namespace L4
|
||||||
} // namespace L4
|
|
|
@ -5,83 +5,73 @@
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// EpochActionManager class implementation.
|
// EpochActionManager class implementation.
|
||||||
|
|
||||||
EpochActionManager::EpochActionManager(std::uint8_t numActionQueues)
|
EpochActionManager::EpochActionManager(std::uint8_t numActionQueues)
|
||||||
: m_epochToActionsList{}
|
: m_epochToActionsList{}, m_counter{} {
|
||||||
, m_counter{}
|
// Calculate numActionQueues as the next highest power of two.
|
||||||
{
|
std::uint16_t newNumActionQueues = numActionQueues;
|
||||||
// Calculate numActionQueues as the next highest power of two.
|
if (numActionQueues == 0U) {
|
||||||
std::uint16_t newNumActionQueues = numActionQueues;
|
newNumActionQueues =
|
||||||
if (numActionQueues == 0U)
|
static_cast<std::uint16_t>(std::thread::hardware_concurrency());
|
||||||
{
|
}
|
||||||
newNumActionQueues = static_cast<std::uint16_t>(std::thread::hardware_concurrency());
|
newNumActionQueues = static_cast<std::uint16_t>(
|
||||||
}
|
Utils::Math::NextHighestPowerOfTwo(newNumActionQueues));
|
||||||
newNumActionQueues = static_cast<std::uint16_t>(Utils::Math::NextHighestPowerOfTwo(newNumActionQueues));
|
|
||||||
|
|
||||||
assert(newNumActionQueues != 0U && Utils::Math::IsPowerOfTwo(newNumActionQueues));
|
assert(newNumActionQueues != 0U &&
|
||||||
|
Utils::Math::IsPowerOfTwo(newNumActionQueues));
|
||||||
|
|
||||||
// Initialize m_epochToActionsList.
|
// Initialize m_epochToActionsList.
|
||||||
m_epochToActionsList.resize(newNumActionQueues);
|
m_epochToActionsList.resize(newNumActionQueues);
|
||||||
for (auto& epochToActions : m_epochToActionsList)
|
for (auto& epochToActions : m_epochToActionsList) {
|
||||||
{
|
std::get<0>(epochToActions) = std::make_unique<Mutex>();
|
||||||
std::get<0>(epochToActions) = std::make_unique<Mutex>();
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EpochActionManager::RegisterAction(std::uint64_t epochCounter,
|
||||||
|
IEpochActionManager::Action&& action) {
|
||||||
|
std::uint32_t index = ++m_counter & (m_epochToActionsList.size() - 1);
|
||||||
|
auto& epochToActions = m_epochToActionsList[index];
|
||||||
|
|
||||||
void EpochActionManager::RegisterAction(std::uint64_t epochCounter, IEpochActionManager::Action&& action)
|
Lock lock(*std::get<0>(epochToActions));
|
||||||
{
|
std::get<1>(epochToActions)[epochCounter].emplace_back(std::move(action));
|
||||||
std::uint32_t index = ++m_counter & (m_epochToActionsList.size() - 1);
|
|
||||||
auto& epochToActions = m_epochToActionsList[index];
|
|
||||||
|
|
||||||
Lock lock(*std::get<0>(epochToActions));
|
|
||||||
std::get<1>(epochToActions)[epochCounter].emplace_back(std::move(action));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::uint64_t EpochActionManager::PerformActions(std::uint64_t epochCounter) {
|
||||||
|
// Actions will be moved here and performed without a lock.
|
||||||
|
Actions actionsToPerform;
|
||||||
|
|
||||||
std::uint64_t EpochActionManager::PerformActions(std::uint64_t epochCounter)
|
for (auto& epochToActionsWithLock : m_epochToActionsList) {
|
||||||
{
|
Lock lock(*std::get<0>(epochToActionsWithLock));
|
||||||
// Actions will be moved here and performed without a lock.
|
|
||||||
Actions actionsToPerform;
|
|
||||||
|
|
||||||
for (auto& epochToActionsWithLock : m_epochToActionsList)
|
// lower_bound() so that it is deleted up to but not including epochCounter.
|
||||||
{
|
auto& epochToActions = std::get<1>(epochToActionsWithLock);
|
||||||
Lock lock(*std::get<0>(epochToActionsWithLock));
|
const auto endIt = epochToActions.lower_bound(epochCounter);
|
||||||
|
|
||||||
// lower_bound() so that it is deleted up to but not including epochCounter.
|
auto it = epochToActions.begin();
|
||||||
auto& epochToActions = std::get<1>(epochToActionsWithLock);
|
|
||||||
const auto endIt = epochToActions.lower_bound(epochCounter);
|
|
||||||
|
|
||||||
auto it = epochToActions.begin();
|
while (it != endIt) {
|
||||||
|
actionsToPerform.insert(actionsToPerform.end(),
|
||||||
|
std::make_move_iterator(it->second.begin()),
|
||||||
|
std::make_move_iterator(it->second.end()));
|
||||||
|
|
||||||
while (it != endIt)
|
// The following post increment is intentional to avoid iterator
|
||||||
{
|
// invalidation issue.
|
||||||
actionsToPerform.insert(
|
epochToActions.erase(it++);
|
||||||
actionsToPerform.end(),
|
|
||||||
std::make_move_iterator(it->second.begin()),
|
|
||||||
std::make_move_iterator(it->second.end()));
|
|
||||||
|
|
||||||
// The following post increment is intentional to avoid iterator invalidation issue.
|
|
||||||
epochToActions.erase(it++);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ApplyActions(actionsToPerform);
|
ApplyActions(actionsToPerform);
|
||||||
|
|
||||||
return actionsToPerform.size();
|
return actionsToPerform.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void EpochActionManager::ApplyActions(Actions& actions) {
|
||||||
void EpochActionManager::ApplyActions(Actions& actions)
|
for (auto& action : actions) {
|
||||||
{
|
action();
|
||||||
for (auto& action : actions)
|
}
|
||||||
{
|
|
||||||
action();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,175 +1,149 @@
|
||||||
#include "Interprocess/Connection/ConnectionMonitor.h"
|
#include "Interprocess/Connection/ConnectionMonitor.h"
|
||||||
|
#include <atomic>
|
||||||
#include "Interprocess/Connection/EndPointInfoUtils.h"
|
#include "Interprocess/Connection/EndPointInfoUtils.h"
|
||||||
#include "Utils/Exception.h"
|
#include "Utils/Exception.h"
|
||||||
#include "Utils/Windows.h"
|
#include "Utils/Windows.h"
|
||||||
#include <atomic>
|
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Connection {
|
||||||
{
|
|
||||||
namespace Connection
|
|
||||||
{
|
|
||||||
|
|
||||||
// ConnectionMonitor class implementation.
|
// ConnectionMonitor class implementation.
|
||||||
|
|
||||||
ConnectionMonitor::ConnectionMonitor()
|
ConnectionMonitor::ConnectionMonitor()
|
||||||
: m_localEndPoint{ EndPointInfoFactory().Create() }
|
: m_localEndPoint{EndPointInfoFactory().Create()},
|
||||||
, m_localEvent{
|
m_localEvent{::CreateEvent(
|
||||||
::CreateEvent(
|
NULL,
|
||||||
NULL,
|
TRUE, // Manual reset in order to notify all end points registered.
|
||||||
TRUE, // Manual reset in order to notify all end points registered.
|
FALSE,
|
||||||
FALSE,
|
StringConverter()(m_localEndPoint).c_str())} {}
|
||||||
StringConverter()(m_localEndPoint).c_str()) }
|
|
||||||
{}
|
|
||||||
|
|
||||||
|
ConnectionMonitor::~ConnectionMonitor() {
|
||||||
ConnectionMonitor::~ConnectionMonitor()
|
// Notify the remote endpoints.
|
||||||
{
|
::SetEvent(static_cast<HANDLE>(m_localEvent));
|
||||||
// Notify the remote endpoints.
|
|
||||||
::SetEvent(static_cast<HANDLE>(m_localEvent));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const EndPointInfo& ConnectionMonitor::GetLocalEndPointInfo() const {
|
||||||
const EndPointInfo& ConnectionMonitor::GetLocalEndPointInfo() const
|
return m_localEndPoint;
|
||||||
{
|
|
||||||
return m_localEndPoint;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::size_t ConnectionMonitor::GetRemoteConnectionsCount() const {
|
||||||
|
UnRegister();
|
||||||
|
|
||||||
std::size_t ConnectionMonitor::GetRemoteConnectionsCount() const
|
std::lock_guard<std::mutex> lock(m_mutexOnRemoteMonitors);
|
||||||
{
|
return m_remoteMonitors.size();
|
||||||
UnRegister();
|
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutexOnRemoteMonitors);
|
|
||||||
return m_remoteMonitors.size();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ConnectionMonitor::Register(const EndPointInfo& remoteEndPoint,
|
||||||
|
Callback callback) {
|
||||||
|
UnRegister();
|
||||||
|
|
||||||
void ConnectionMonitor::Register(const EndPointInfo& remoteEndPoint, Callback callback)
|
// The following is needed to prevent the case where the callback is trying
|
||||||
{
|
// to call UnRegister() when the ConnectionMonitor is already destroyed.
|
||||||
UnRegister();
|
std::weak_ptr<ConnectionMonitor> thisWeakPtr = this->shared_from_this();
|
||||||
|
|
||||||
// The following is needed to prevent the case where the callback is trying
|
// The following ensures that only one callback is triggered from one endpoint
|
||||||
// to call UnRegister() when the ConnectionMonitor is already destroyed.
|
// even if we are waiting for two handles (process and event).
|
||||||
std::weak_ptr<ConnectionMonitor> thisWeakPtr = this->shared_from_this();
|
auto isCalled = std::make_shared<std::atomic_bool>(false);
|
||||||
|
|
||||||
// The following ensures that only one callback is triggered from one endpoint
|
std::lock_guard<std::mutex> lock(m_mutexOnRemoteMonitors);
|
||||||
// even if we are waiting for two handles (process and event).
|
|
||||||
auto isCalled = std::make_shared<std::atomic_bool>(false);
|
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutexOnRemoteMonitors);
|
// Note that the following call may throw since opening handles may fail, but
|
||||||
|
// it is exception safe (std::map::emplace has a strong guarantee on it).
|
||||||
|
if (!m_remoteMonitors
|
||||||
|
.emplace(remoteEndPoint,
|
||||||
|
std::make_unique<HandleMonitor>(
|
||||||
|
remoteEndPoint,
|
||||||
|
[thisWeakPtr, callback,
|
||||||
|
isCalled](const auto& remoteEndPoint) {
|
||||||
|
if (isCalled->exchange(true)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Note that the following call may throw since opening handles may fail, but
|
callback(remoteEndPoint);
|
||||||
// it is exception safe (std::map::emplace has a strong guarantee on it).
|
auto connectionMonitor = thisWeakPtr.lock();
|
||||||
if (!m_remoteMonitors.emplace(
|
if (connectionMonitor != nullptr) {
|
||||||
remoteEndPoint,
|
// Cannot call UnRegister() because it will
|
||||||
std::make_unique<HandleMonitor>(
|
// self-destruct. Instead, call the UnRegister(const
|
||||||
remoteEndPoint,
|
// EndPointInfo&) and queue up the end point that
|
||||||
[thisWeakPtr, callback, isCalled](const auto& remoteEndPoint)
|
// will be removed from m_remoteEvents at a later
|
||||||
{
|
// time.
|
||||||
if (isCalled->exchange(true))
|
connectionMonitor->UnRegister(remoteEndPoint);
|
||||||
{
|
}
|
||||||
return;
|
}))
|
||||||
}
|
.second) {
|
||||||
|
throw RuntimeException("Duplicate end point found.");
|
||||||
callback(remoteEndPoint);
|
}
|
||||||
auto connectionMonitor = thisWeakPtr.lock();
|
|
||||||
if (connectionMonitor != nullptr)
|
|
||||||
{
|
|
||||||
// Cannot call UnRegister() because it will self-destruct.
|
|
||||||
// Instead, call the UnRegister(const EndPointInfo&) and queue up the end point
|
|
||||||
// that will be removed from m_remoteEvents at a later time.
|
|
||||||
connectionMonitor->UnRegister(remoteEndPoint);
|
|
||||||
}
|
|
||||||
})).second)
|
|
||||||
{
|
|
||||||
throw RuntimeException("Duplicate end point found.");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ConnectionMonitor::UnRegister(const EndPointInfo& remoteEndPoint) {
|
||||||
|
std::lock_guard<std::mutex> lock(m_mutexOnUnregisteredEndPoints);
|
||||||
|
m_unregisteredEndPoints.emplace_back(remoteEndPoint);
|
||||||
|
}
|
||||||
|
|
||||||
void ConnectionMonitor::UnRegister(const EndPointInfo& remoteEndPoint)
|
void ConnectionMonitor::UnRegister() const {
|
||||||
{
|
std::vector<EndPointInfo> unregisteredEndPoints;
|
||||||
|
{
|
||||||
|
// It is possible that the erase() in the following block can
|
||||||
|
// wait for the callback to finish (::WaitForThreadpoolWaitCallbacks).
|
||||||
|
// Since the callback calls the UnRegister(const EndPointinfo&), it can
|
||||||
|
// deadlock if this function holds the lock while calling the erase(). Thus,
|
||||||
|
// copy the m_unregisteredEndPoints and release the lock before calling
|
||||||
|
// erase() below.
|
||||||
std::lock_guard<std::mutex> lock(m_mutexOnUnregisteredEndPoints);
|
std::lock_guard<std::mutex> lock(m_mutexOnUnregisteredEndPoints);
|
||||||
m_unregisteredEndPoints.emplace_back(remoteEndPoint);
|
unregisteredEndPoints.swap(m_unregisteredEndPoints);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::lock_guard<std::mutex> lock(m_mutexOnRemoteMonitors);
|
||||||
|
for (const auto& endPoint : unregisteredEndPoints) {
|
||||||
|
m_remoteMonitors.erase(endPoint);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ConnectionMonitor::UnRegister() const
|
|
||||||
{
|
|
||||||
std::vector<EndPointInfo> unregisteredEndPoints;
|
|
||||||
{
|
|
||||||
// It is possible that the erase() in the following block can
|
|
||||||
// wait for the callback to finish (::WaitForThreadpoolWaitCallbacks).
|
|
||||||
// Since the callback calls the UnRegister(const EndPointinfo&), it can deadlock
|
|
||||||
// if this function holds the lock while calling the erase().
|
|
||||||
// Thus, copy the m_unregisteredEndPoints and release the lock before calling erase() below.
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutexOnUnregisteredEndPoints);
|
|
||||||
unregisteredEndPoints.swap(m_unregisteredEndPoints);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(m_mutexOnRemoteMonitors);
|
|
||||||
for (const auto& endPoint : unregisteredEndPoints)
|
|
||||||
{
|
|
||||||
m_remoteMonitors.erase(endPoint);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// ConnectionMonitor::HandleMonitor::HandleMonitor class implementation.
|
// ConnectionMonitor::HandleMonitor::HandleMonitor class implementation.
|
||||||
|
|
||||||
ConnectionMonitor::HandleMonitor::HandleMonitor(
|
ConnectionMonitor::HandleMonitor::HandleMonitor(
|
||||||
const EndPointInfo& remoteEndPoint,
|
const EndPointInfo& remoteEndPoint,
|
||||||
Callback callback)
|
Callback callback)
|
||||||
: m_eventWaiter{
|
: m_eventWaiter{std::make_unique<Waiter>(
|
||||||
std::make_unique<Waiter>(
|
Utils::Handle{::OpenEvent(SYNCHRONIZE,
|
||||||
Utils::Handle{ ::OpenEvent(SYNCHRONIZE, FALSE, StringConverter()(remoteEndPoint).c_str()) },
|
FALSE,
|
||||||
[callback, endPoint = remoteEndPoint] { callback(endPoint); }) }
|
StringConverter()(remoteEndPoint).c_str())},
|
||||||
, m_processWaiter{
|
[callback, endPoint = remoteEndPoint] { callback(endPoint); })},
|
||||||
std::make_unique<Waiter>(
|
m_processWaiter{std::make_unique<Waiter>(
|
||||||
Utils::Handle{ ::OpenProcess(SYNCHRONIZE, FALSE, remoteEndPoint.m_pid) },
|
Utils::Handle{
|
||||||
[callback, endPoint = remoteEndPoint] { callback(endPoint); }) }
|
::OpenProcess(SYNCHRONIZE, FALSE, remoteEndPoint.m_pid)},
|
||||||
{}
|
[callback, endPoint = remoteEndPoint] { callback(endPoint); })} {}
|
||||||
|
|
||||||
|
|
||||||
// ConnectionMonitor::HandleMonitor::Waiter class implementation.
|
// ConnectionMonitor::HandleMonitor::Waiter class implementation.
|
||||||
|
|
||||||
ConnectionMonitor::HandleMonitor::Waiter::Waiter(Utils::Handle handle, Callback callback)
|
ConnectionMonitor::HandleMonitor::Waiter::Waiter(Utils::Handle handle,
|
||||||
: m_handle{ std::move(handle) }
|
Callback callback)
|
||||||
, m_callback{ callback }
|
: m_handle{std::move(handle)},
|
||||||
, m_wait{
|
m_callback{callback},
|
||||||
::CreateThreadpoolWait(OnEvent, this, NULL),
|
m_wait{::CreateThreadpoolWait(OnEvent, this, NULL),
|
||||||
::CloseThreadpoolWait }
|
::CloseThreadpoolWait} {
|
||||||
{
|
::SetThreadpoolWait(m_wait.get(), static_cast<HANDLE>(m_handle), NULL);
|
||||||
::SetThreadpoolWait(m_wait.get(), static_cast<HANDLE>(m_handle), NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ConnectionMonitor::HandleMonitor::Waiter::~Waiter() {
|
||||||
|
::SetThreadpoolWait(m_wait.get(), NULL, NULL);
|
||||||
|
|
||||||
ConnectionMonitor::HandleMonitor::Waiter::~Waiter()
|
::WaitForThreadpoolWaitCallbacks(m_wait.get(), TRUE);
|
||||||
{
|
|
||||||
::SetThreadpoolWait(m_wait.get(), NULL, NULL);
|
|
||||||
|
|
||||||
::WaitForThreadpoolWaitCallbacks(m_wait.get(), TRUE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
VOID CALLBACK ConnectionMonitor::HandleMonitor::Waiter::OnEvent(
|
VOID CALLBACK ConnectionMonitor::HandleMonitor::Waiter::OnEvent(
|
||||||
PTP_CALLBACK_INSTANCE /*instance*/,
|
PTP_CALLBACK_INSTANCE /*instance*/,
|
||||||
PVOID context,
|
PVOID context,
|
||||||
PTP_WAIT /*wait*/,
|
PTP_WAIT /*wait*/,
|
||||||
TP_WAIT_RESULT waitResult)
|
TP_WAIT_RESULT waitResult) {
|
||||||
{
|
if (waitResult == WAIT_OBJECT_0) {
|
||||||
if (waitResult == WAIT_OBJECT_0)
|
static_cast<Waiter*>(context)->m_callback();
|
||||||
{
|
} else {
|
||||||
static_cast<Waiter*>(context)->m_callback();
|
throw std::runtime_error{"Unexpected wait result is received."};
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
throw std::runtime_error{ "Unexpected wait result is received." };
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Connection
|
} // namespace Connection
|
||||||
} // namespace Interprocess
|
} // namespace Interprocess
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,35 +1,26 @@
|
||||||
#include "Interprocess/Connection/EndPointInfoUtils.h"
|
#include "Interprocess/Connection/EndPointInfoUtils.h"
|
||||||
#include "Utils/Windows.h"
|
|
||||||
#include <boost/uuid/random_generator.hpp>
|
#include <boost/uuid/random_generator.hpp>
|
||||||
#include <boost/uuid/uuid_io.hpp>
|
#include <boost/uuid/uuid_io.hpp>
|
||||||
|
#include "Utils/Windows.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Connection {
|
||||||
{
|
|
||||||
namespace Connection
|
|
||||||
{
|
|
||||||
|
|
||||||
// EndPointInfoFactory class implementation.
|
// EndPointInfoFactory class implementation.
|
||||||
|
|
||||||
EndPointInfo EndPointInfoFactory::Create() const
|
EndPointInfo EndPointInfoFactory::Create() const {
|
||||||
{
|
return EndPointInfo{GetCurrentProcessId(),
|
||||||
return EndPointInfo{ GetCurrentProcessId(), boost::uuids::random_generator()() };
|
boost::uuids::random_generator()()};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// StringConverter class implementation.
|
// StringConverter class implementation.
|
||||||
|
|
||||||
std::string StringConverter::operator()(const EndPointInfo& endPoint) const
|
std::string StringConverter::operator()(const EndPointInfo& endPoint) const {
|
||||||
{
|
return "[pid:" + std::to_string(endPoint.m_pid) + "," +
|
||||||
return "[pid:"
|
"uuid:" + boost::uuids::to_string(endPoint.m_uuid) + "]";
|
||||||
+ std::to_string(endPoint.m_pid)
|
|
||||||
+ ","
|
|
||||||
+ "uuid:"
|
|
||||||
+ boost::uuids::to_string(endPoint.m_uuid)
|
|
||||||
+ "]";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Connection
|
} // namespace Connection
|
||||||
} // namespace Interprocess
|
} // namespace Interprocess
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,48 +1,35 @@
|
||||||
#include "Interprocess/Utils/Handle.h"
|
#include "Interprocess/Utils/Handle.h"
|
||||||
#include "Utils/Exception.h"
|
|
||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
|
#include "Utils/Exception.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
namespace Interprocess {
|
||||||
namespace Interprocess
|
namespace Utils {
|
||||||
{
|
|
||||||
namespace Utils
|
|
||||||
{
|
|
||||||
|
|
||||||
// Handle class implementation.
|
// Handle class implementation.
|
||||||
|
|
||||||
Handle::Handle(HANDLE handle, bool verifyHandle)
|
Handle::Handle(HANDLE handle, bool verifyHandle)
|
||||||
: m_handle{ Verify(handle, verifyHandle), ::CloseHandle }
|
: m_handle{Verify(handle, verifyHandle), ::CloseHandle} {}
|
||||||
{}
|
|
||||||
|
|
||||||
|
Handle::Handle(Handle&& other) : m_handle{std::move(other.m_handle)} {}
|
||||||
|
|
||||||
Handle::Handle(Handle&& other)
|
Handle::operator HANDLE() const {
|
||||||
: m_handle{ std::move(other.m_handle) }
|
return m_handle.get();
|
||||||
{}
|
|
||||||
|
|
||||||
|
|
||||||
Handle::operator HANDLE() const
|
|
||||||
{
|
|
||||||
return m_handle.get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HANDLE Handle::Verify(HANDLE handle, bool verifyHandle) const {
|
||||||
HANDLE Handle::Verify(HANDLE handle, bool verifyHandle) const
|
if (handle == NULL || handle == INVALID_HANDLE_VALUE || verifyHandle) {
|
||||||
{
|
auto error = ::GetLastError();
|
||||||
if (handle == NULL || handle == INVALID_HANDLE_VALUE || verifyHandle)
|
if (error != ERROR_SUCCESS) {
|
||||||
{
|
boost::format err("Invalid handle: %1%.");
|
||||||
auto error = ::GetLastError();
|
err % error;
|
||||||
if (error != ERROR_SUCCESS)
|
throw RuntimeException(err.str());
|
||||||
{
|
|
||||||
boost::format err("Invalid handle: %1%.");
|
|
||||||
err % error;
|
|
||||||
throw RuntimeException(err.str());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Utils
|
} // namespace Utils
|
||||||
} // namespace Interprocess
|
} // namespace Interprocess
|
||||||
} // namespace L4
|
} // namespace L4
|
||||||
|
|
|
@ -1,25 +1,21 @@
|
||||||
#include "Log/PerfLogger.h"
|
#include "Log/PerfLogger.h"
|
||||||
#include "Utils/Exception.h"
|
|
||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
|
#include "Utils/Exception.h"
|
||||||
|
|
||||||
namespace L4
|
namespace L4 {
|
||||||
{
|
|
||||||
|
|
||||||
// PerfData class implementation.
|
// PerfData class implementation.
|
||||||
|
|
||||||
void PerfData::AddHashTablePerfData(const char* hashTableName, const HashTablePerfData& perfData)
|
void PerfData::AddHashTablePerfData(const char* hashTableName,
|
||||||
{
|
const HashTablePerfData& perfData) {
|
||||||
auto result = m_hashTablesPerfData.insert(
|
auto result = m_hashTablesPerfData.insert(
|
||||||
std::make_pair(
|
std::make_pair(hashTableName, HashTablesPerfData::mapped_type(perfData)));
|
||||||
hashTableName,
|
|
||||||
HashTablesPerfData::mapped_type(perfData)));
|
|
||||||
|
|
||||||
if (!result.second)
|
if (!result.second) {
|
||||||
{
|
boost::format err("Duplicate hash table name found: '%1%'.");
|
||||||
boost::format err("Duplicate hash table name found: '%1%'.");
|
err % hashTableName;
|
||||||
err % hashTableName;
|
throw RuntimeException(err.str());
|
||||||
throw RuntimeException(err.str());
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace L4
|
} // namespace L4
|
Загрузка…
Ссылка в новой задаче