зеркало из https://github.com/microsoft/CCF.git
Fix issues with snapshot generation for CHAMP (#4730)
This commit is contained in:
Родитель
c3b910ff0c
Коммит
16a5defab0
|
@ -1,4 +1,4 @@
|
||||||
___ ___
|
___ ___
|
||||||
(- *) (O o) | Y D O
|
(- *) (O o) | Y ^ O
|
||||||
( V ) < V > O +---'---'
|
( V ) < V > O +---'---'
|
||||||
/--x-m- /--m-m---xXx--/--yy---
|
/--x-m- /--m-m---xXx--/--yy---
|
||||||
|
|
|
@ -29,6 +29,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Session consistency is now provided even across elections. If session consistency would be broken, the inconsistent request will return an error and the TLS session will be terminated.
|
- Session consistency is now provided even across elections. If session consistency would be broken, the inconsistent request will return an error and the TLS session will be terminated.
|
||||||
|
- Fixed issue where invalid snapshots could be generated depending on the pattern of additions/removals of keys in a given key-value map (#4730).
|
||||||
|
|
||||||
## [4.0.0-dev0]
|
## [4.0.0-dev0]
|
||||||
|
|
||||||
|
|
|
@ -271,6 +271,7 @@ namespace logger
|
||||||
|
|
||||||
static inline void default_init()
|
static inline void default_init()
|
||||||
{
|
{
|
||||||
|
get_loggers().clear();
|
||||||
add_text_console_logger();
|
add_text_console_logger();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -126,8 +126,10 @@ namespace champ
|
||||||
const auto& entry = bin[i];
|
const auto& entry = bin[i];
|
||||||
if (k == entry->key)
|
if (k == entry->key)
|
||||||
{
|
{
|
||||||
|
const auto diff = map::get_serialized_size_with_padding(entry->key) +
|
||||||
|
map::get_serialized_size_with_padding(entry->value);
|
||||||
bin[i] = std::make_shared<Entry<K, V>>(k, v);
|
bin[i] = std::make_shared<Entry<K, V>>(k, v);
|
||||||
return map::get_size(k) + map::get_size(v);
|
return diff;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bin.push_back(std::make_shared<Entry<K, V>>(k, v));
|
bin.push_back(std::make_shared<Entry<K, V>>(k, v));
|
||||||
|
@ -143,8 +145,8 @@ namespace champ
|
||||||
const auto& entry = bin[i];
|
const auto& entry = bin[i];
|
||||||
if (k == entry->key)
|
if (k == entry->key)
|
||||||
{
|
{
|
||||||
const auto diff =
|
const auto diff = map::get_serialized_size_with_padding(entry->key) +
|
||||||
map::get_size(entry->key) + map::get_size(entry->value);
|
map::get_serialized_size_with_padding(entry->value);
|
||||||
bin.erase(bin.begin() + i);
|
bin.erase(bin.begin() + i);
|
||||||
return diff;
|
return diff;
|
||||||
}
|
}
|
||||||
|
@ -211,6 +213,7 @@ namespace champ
|
||||||
return node_as<SubNodes<K, V, H>>(c_idx)->getp(depth + 1, hash, k);
|
return node_as<SubNodes<K, V, H>>(c_idx)->getp(depth + 1, hash, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns serialised size of overwritten (k,v) if k exists, 0 otherwise
|
||||||
size_t put_mut(SmallIndex depth, Hash hash, const K& k, const V& v)
|
size_t put_mut(SmallIndex depth, Hash hash, const K& k, const V& v)
|
||||||
{
|
{
|
||||||
const auto idx = mask(hash, depth);
|
const auto idx = mask(hash, depth);
|
||||||
|
@ -246,8 +249,8 @@ namespace champ
|
||||||
const auto& entry0 = node_as<Entry<K, V>>(c_idx);
|
const auto& entry0 = node_as<Entry<K, V>>(c_idx);
|
||||||
if (k == entry0->key)
|
if (k == entry0->key)
|
||||||
{
|
{
|
||||||
auto current_size = map::get_size_with_padding(entry0->key) +
|
auto current_size = map::get_serialized_size_with_padding(entry0->key) +
|
||||||
map::get_size_with_padding(entry0->value);
|
map::get_serialized_size_with_padding(entry0->value);
|
||||||
nodes[c_idx] = std::make_shared<Entry<K, V>>(k, v);
|
nodes[c_idx] = std::make_shared<Entry<K, V>>(k, v);
|
||||||
return current_size;
|
return current_size;
|
||||||
}
|
}
|
||||||
|
@ -297,6 +300,7 @@ namespace champ
|
||||||
std::make_shared<SubNodes<K, V, H>>(std::move(node)), r);
|
std::make_shared<SubNodes<K, V, H>>(std::move(node)), r);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns serialised size of removed (k,v) if k exists, 0 otherwise
|
||||||
size_t remove_mut(SmallIndex depth, Hash hash, const K& k)
|
size_t remove_mut(SmallIndex depth, Hash hash, const K& k)
|
||||||
{
|
{
|
||||||
const auto idx = mask(hash, depth);
|
const auto idx = mask(hash, depth);
|
||||||
|
@ -311,8 +315,8 @@ namespace champ
|
||||||
if (entry->key != k)
|
if (entry->key != k)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
const auto diff = map::get_size_with_padding(entry->key) +
|
const auto diff = map::get_serialized_size_with_padding(entry->key) +
|
||||||
map::get_size_with_padding(entry->value);
|
map::get_serialized_size_with_padding(entry->value);
|
||||||
nodes.erase(nodes.begin() + c_idx);
|
nodes.erase(nodes.begin() + c_idx);
|
||||||
data_map = data_map.clear(idx);
|
data_map = data_map.clear(idx);
|
||||||
return diff;
|
return diff;
|
||||||
|
@ -437,9 +441,10 @@ namespace champ
|
||||||
if (r.second == 0)
|
if (r.second == 0)
|
||||||
size_++;
|
size_++;
|
||||||
|
|
||||||
const auto size_change =
|
const auto size_change = (map::get_serialized_size_with_padding(key) +
|
||||||
(map::get_size_with_padding(key) + map::get_size_with_padding(value)) -
|
map::get_serialized_size_with_padding(value)) -
|
||||||
r.second;
|
r.second;
|
||||||
|
|
||||||
return Map(std::move(r.first), size_, size_change + serialized_size);
|
return Map(std::move(r.first), size_, size_change + serialized_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -492,17 +497,14 @@ namespace champ
|
||||||
{
|
{
|
||||||
std::vector<KVTuple> ordered_state;
|
std::vector<KVTuple> ordered_state;
|
||||||
ordered_state.reserve(map.size());
|
ordered_state.reserve(map.size());
|
||||||
size_t size = 0;
|
size_t serialized_size = 0;
|
||||||
|
|
||||||
map.foreach([&](auto& key, auto& value) {
|
map.foreach([&ordered_state, &serialized_size](auto& key, auto& value) {
|
||||||
K* k = &key;
|
K* k = &key;
|
||||||
V* v = &value;
|
V* v = &value;
|
||||||
uint32_t ks = map::get_size(key);
|
uint32_t key_size = map::get_serialized_size_with_padding(key);
|
||||||
uint32_t vs = map::get_size(value);
|
uint32_t value_size = map::get_serialized_size_with_padding(value);
|
||||||
uint32_t key_size = ks + map::get_padding(ks);
|
serialized_size += (key_size + value_size);
|
||||||
uint32_t value_size = vs + map::get_padding(vs);
|
|
||||||
|
|
||||||
size += (key_size + value_size);
|
|
||||||
|
|
||||||
ordered_state.emplace_back(k, static_cast<Hash>(H()(key)), v);
|
ordered_state.emplace_back(k, static_cast<Hash>(H()(key)), v);
|
||||||
|
|
||||||
|
@ -517,9 +519,10 @@ namespace champ
|
||||||
});
|
});
|
||||||
|
|
||||||
CCF_ASSERT_FMT(
|
CCF_ASSERT_FMT(
|
||||||
size == map.get_serialized_size(),
|
serialized_size == map.get_serialized_size(),
|
||||||
"size:{}, map->size:{} ==> count:{}, vect:{}",
|
"Serialized size:{}, map.get_serialized_size():{} (map count:{}, "
|
||||||
size,
|
"ordered state count:{})",
|
||||||
|
serialized_size,
|
||||||
map.get_serialized_size(),
|
map.get_serialized_size(),
|
||||||
map.size(),
|
map.size(),
|
||||||
ordered_state.size());
|
ordered_state.size());
|
||||||
|
@ -527,15 +530,19 @@ namespace champ
|
||||||
for (const auto& p : ordered_state)
|
for (const auto& p : ordered_state)
|
||||||
{
|
{
|
||||||
// Serialize the key
|
// Serialize the key
|
||||||
uint32_t key_size = map::serialize(*p.k, data, size);
|
uint32_t key_size = map::serialize(*p.k, data, serialized_size);
|
||||||
map::add_padding(key_size, data, size);
|
map::add_padding(key_size, data, serialized_size);
|
||||||
|
|
||||||
// Serialize the value
|
// Serialize the value
|
||||||
uint32_t value_size = map::serialize(*p.v, data, size);
|
uint32_t value_size = map::serialize(*p.v, data, serialized_size);
|
||||||
map::add_padding(value_size, data, size);
|
map::add_padding(value_size, data, serialized_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
CCF_ASSERT_FMT(size == 0, "buffer not filled, remaining:{}", size);
|
CCF_ASSERT_FMT(
|
||||||
|
serialized_size == 0,
|
||||||
|
"Serialization buffer is not complete, remaining:{}/{}",
|
||||||
|
serialized_size,
|
||||||
|
map.get_serialized_size());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ namespace map
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
static size_t get_size_with_padding(const T& t)
|
static size_t get_serialized_size_with_padding(const T& t)
|
||||||
{
|
{
|
||||||
const uint32_t t_size = get_size(t);
|
const uint32_t t_size = get_size(t);
|
||||||
return t_size + get_padding(t_size);
|
return t_size + get_padding(t_size);
|
||||||
|
|
|
@ -38,8 +38,8 @@ namespace rb
|
||||||
_rgt(rgt)
|
_rgt(rgt)
|
||||||
{
|
{
|
||||||
total_size = 1;
|
total_size = 1;
|
||||||
total_serialized_size =
|
total_serialized_size = map::get_serialized_size_with_padding(key) +
|
||||||
map::get_size_with_padding(key) + map::get_size_with_padding(val);
|
map::get_serialized_size_with_padding(val);
|
||||||
if (lft)
|
if (lft)
|
||||||
{
|
{
|
||||||
total_size += lft->size();
|
total_size += lft->size();
|
||||||
|
|
|
@ -2,8 +2,12 @@
|
||||||
// Licensed under the Apache 2.0 License.
|
// Licensed under the Apache 2.0 License.
|
||||||
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
|
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
|
||||||
#include "ccf/byte_vector.h"
|
#include "ccf/byte_vector.h"
|
||||||
|
#include "ccf/ds/logger.h"
|
||||||
|
#include "ccf/kv/serialisers/serialised_entry.h"
|
||||||
#include "ds/champ_map.h"
|
#include "ds/champ_map.h"
|
||||||
#include "ds/rb_map.h"
|
#include "ds/rb_map.h"
|
||||||
|
#include "ds/std_formatters.h"
|
||||||
|
#include "kv/untyped_change_set.h"
|
||||||
|
|
||||||
#include <doctest/doctest.h>
|
#include <doctest/doctest.h>
|
||||||
#include <random>
|
#include <random>
|
||||||
|
@ -18,8 +22,9 @@ struct CollisionHash
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
using K = uint64_t;
|
using K = kv::serialisers::SerialisedEntry;
|
||||||
using V = uint64_t;
|
using V = kv::serialisers::SerialisedEntry;
|
||||||
|
constexpr static size_t max_key_value_size = 128;
|
||||||
|
|
||||||
namespace map
|
namespace map
|
||||||
{
|
{
|
||||||
|
@ -103,7 +108,7 @@ struct Put : public Op<M>
|
||||||
std::string str()
|
std::string str()
|
||||||
{
|
{
|
||||||
auto ss = std::stringstream();
|
auto ss = std::stringstream();
|
||||||
ss << "Put(" << H<K>()(k) << ", " << v << ")";
|
ss << "Put(" << H<K>()(k) << ", value of size " << v.size() << ")";
|
||||||
return ss.str();
|
return ss.str();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -157,8 +162,9 @@ std::vector<std::unique_ptr<Op<M>>> gen_ops(size_t n)
|
||||||
|
|
||||||
std::vector<std::unique_ptr<Op<M>>> ops;
|
std::vector<std::unique_ptr<Op<M>>> ops;
|
||||||
std::vector<K> keys;
|
std::vector<K> keys;
|
||||||
for (V v = 0; v < n; ++v)
|
for (size_t i = 0; i < n; ++i)
|
||||||
{
|
{
|
||||||
|
V v(gen() % max_key_value_size, 'v');
|
||||||
std::unique_ptr<Op<M>> op;
|
std::unique_ptr<Op<M>> op;
|
||||||
auto op_i = keys.empty() ? 0 : gen_op(gen);
|
auto op_i = keys.empty() ? 0 : gen_op(gen);
|
||||||
switch (op_i)
|
switch (op_i)
|
||||||
|
@ -166,7 +172,7 @@ std::vector<std::unique_ptr<Op<M>>> gen_ops(size_t n)
|
||||||
case 0:
|
case 0:
|
||||||
case 1: // insert
|
case 1: // insert
|
||||||
{
|
{
|
||||||
auto k = gen();
|
K k(gen() % max_key_value_size, 'k');
|
||||||
keys.push_back(k);
|
keys.push_back(k);
|
||||||
op = std::make_unique<Put<M>>(k, v);
|
op = std::make_unique<Put<M>>(k, v);
|
||||||
|
|
||||||
|
@ -254,128 +260,75 @@ template <class M>
|
||||||
static const M gen_map(size_t size)
|
static const M gen_map(size_t size)
|
||||||
{
|
{
|
||||||
M map;
|
M map;
|
||||||
for (size_t i = 0; i < size; ++i)
|
Model model;
|
||||||
|
|
||||||
|
auto ops = gen_ops<M>(size);
|
||||||
|
for (auto& op : ops)
|
||||||
{
|
{
|
||||||
map = map.put(i, i);
|
auto r = op->apply(model, map);
|
||||||
|
map = r.second;
|
||||||
}
|
}
|
||||||
|
|
||||||
return map;
|
return map;
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE_TEMPLATE("Snapshot map", M, RBMap, ChampMap)
|
TEST_CASE_TEMPLATE("Snapshot map", M, ChampMap, RBMap)
|
||||||
{
|
{
|
||||||
std::vector<KVPair> results;
|
size_t ops_count = 2048;
|
||||||
uint32_t num_elements = 100;
|
auto map = gen_map<M>(ops_count);
|
||||||
auto map = gen_map<M>(num_elements);
|
std::map<K, V> contents; // Ordered content as Champ is unordered
|
||||||
|
std::vector<uint8_t> serialised_snapshot;
|
||||||
|
|
||||||
INFO("Check initial content of map");
|
M new_map;
|
||||||
|
|
||||||
|
INFO("Record content of source map");
|
||||||
{
|
{
|
||||||
map.foreach([&results](const auto& key, const auto& value) {
|
map.foreach([&contents](const auto& key, const auto& value) {
|
||||||
results.push_back({key, value});
|
contents[key] = value;
|
||||||
return true;
|
return true;
|
||||||
});
|
});
|
||||||
REQUIRE_EQ(num_elements, results.size());
|
REQUIRE_EQ(map.size(), contents.size());
|
||||||
REQUIRE_EQ(map.size(), num_elements);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
INFO("Populate second map and compare");
|
INFO("Generate snapshot");
|
||||||
{
|
|
||||||
std::set<K> keys;
|
|
||||||
M new_map;
|
|
||||||
for (const auto& p : results)
|
|
||||||
{
|
|
||||||
REQUIRE_LT(p.k, num_elements);
|
|
||||||
keys.insert(p.k);
|
|
||||||
new_map = new_map.put(p.k, p.v);
|
|
||||||
}
|
|
||||||
REQUIRE_EQ(num_elements, new_map.size());
|
|
||||||
REQUIRE_EQ(num_elements, keys.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
INFO("Serialize map to array");
|
|
||||||
{
|
{
|
||||||
auto snapshot = map.make_snapshot();
|
auto snapshot = map.make_snapshot();
|
||||||
std::vector<uint8_t> s(map.get_serialized_size());
|
serialised_snapshot.resize(map.get_serialized_size());
|
||||||
snapshot->serialize(s.data());
|
snapshot->serialize(serialised_snapshot.data());
|
||||||
|
|
||||||
auto new_map = map::deserialize_map<M>(s);
|
INFO("Ensure serialised state is byte identical");
|
||||||
|
{
|
||||||
|
auto snapshot_2 = map.make_snapshot();
|
||||||
|
std::vector<uint8_t> serialised_snapshot_2(map.get_serialized_size());
|
||||||
|
snapshot_2->serialize(serialised_snapshot_2.data());
|
||||||
|
REQUIRE_EQ(serialised_snapshot, serialised_snapshot_2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
std::set<K> keys;
|
INFO("Apply snapshot to target map");
|
||||||
new_map.foreach([&keys](const auto& key, const auto& value) {
|
{
|
||||||
keys.insert(key);
|
new_map = map::deserialize_map<M>(serialised_snapshot);
|
||||||
REQUIRE_EQ(key, value);
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
REQUIRE_EQ(map.size(), new_map.size());
|
REQUIRE_EQ(map.size(), new_map.size());
|
||||||
REQUIRE_EQ(map.size(), keys.size());
|
|
||||||
|
|
||||||
// Check that new entries can be added to deserialised map
|
std::map<K, V> new_contents;
|
||||||
uint32_t offset = 1000;
|
new_map.foreach([&new_contents](const auto& key, const auto& value) {
|
||||||
for (uint32_t i = offset; i < offset + num_elements; ++i)
|
new_contents[key] = value;
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
REQUIRE_EQ(contents.size(), new_contents.size());
|
||||||
|
REQUIRE_EQ(contents, new_contents);
|
||||||
|
}
|
||||||
|
|
||||||
|
INFO("Check that new entries can be added to target map");
|
||||||
|
{
|
||||||
|
Model new_model;
|
||||||
|
auto new_ops = gen_ops<M>(ops_count);
|
||||||
|
for (auto& op : new_ops)
|
||||||
{
|
{
|
||||||
new_map = new_map.put(i, i);
|
auto r = op->apply(new_model, new_map);
|
||||||
}
|
new_map = r.second;
|
||||||
REQUIRE_EQ(new_map.size(), map.size() + num_elements);
|
|
||||||
for (uint32_t i = offset; i < offset + num_elements; ++i)
|
|
||||||
{
|
|
||||||
auto p = new_map.get(i);
|
|
||||||
REQUIRE(p.has_value());
|
|
||||||
REQUIRE(p.value() == i);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
INFO("Ensure serialized state is byte identical");
|
|
||||||
{
|
|
||||||
auto snapshot_1 = map.make_snapshot();
|
|
||||||
std::vector<uint8_t> s_1(map.get_serialized_size());
|
|
||||||
snapshot_1->serialize(s_1.data());
|
|
||||||
|
|
||||||
auto snapshot_2 = map.make_snapshot();
|
|
||||||
std::vector<uint8_t> s_2(map.get_serialized_size());
|
|
||||||
snapshot_2->serialize(s_2.data());
|
|
||||||
|
|
||||||
REQUIRE_EQ(s_1, s_2);
|
|
||||||
}
|
|
||||||
|
|
||||||
INFO("Snapshot is immutable");
|
|
||||||
{
|
|
||||||
size_t current_size = map.size();
|
|
||||||
auto snapshot = map.make_snapshot();
|
|
||||||
std::vector<uint8_t> s_1(map.get_serialized_size());
|
|
||||||
snapshot->serialize(s_1.data());
|
|
||||||
|
|
||||||
// Add entry in map
|
|
||||||
auto key = current_size + 1;
|
|
||||||
REQUIRE(map.get(key) == std::nullopt);
|
|
||||||
map = map.put(key, key);
|
|
||||||
|
|
||||||
// Even though map has been updated, snapshot is not modified
|
|
||||||
std::vector<uint8_t> s_2(s_1.size());
|
|
||||||
snapshot->serialize(s_2.data());
|
|
||||||
REQUIRE_EQ(s_1, s_2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
using SerialisedKey = ccf::ByteVector;
|
|
||||||
using SerialisedValue = ccf::ByteVector;
|
|
||||||
|
|
||||||
TEST_CASE_TEMPLATE(
|
|
||||||
"Serialize map with different key sizes",
|
|
||||||
M,
|
|
||||||
UntypedChampMap<SerialisedKey, SerialisedValue>,
|
|
||||||
UntypedRBMap<SerialisedKey, SerialisedValue>)
|
|
||||||
{
|
|
||||||
M map;
|
|
||||||
SerialisedKey key(16);
|
|
||||||
SerialisedValue long_key(128);
|
|
||||||
SerialisedValue value(8);
|
|
||||||
SerialisedValue long_value(256);
|
|
||||||
|
|
||||||
map = map.put(key, value);
|
|
||||||
map = map.put(long_key, long_value);
|
|
||||||
|
|
||||||
auto snapshot = map.make_snapshot();
|
|
||||||
std::vector<uint8_t> s(map.get_serialized_size());
|
|
||||||
snapshot->serialize(s.data());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename M>
|
template <typename M>
|
||||||
|
@ -390,6 +343,55 @@ std::map<K, V> get_all_entries(const M& map)
|
||||||
return entries;
|
return entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_CASE_TEMPLATE("Snapshot is immutable", M, ChampMap, RBMap)
|
||||||
|
{
|
||||||
|
size_t ops_count = 2048;
|
||||||
|
auto map = gen_map<M>(ops_count);
|
||||||
|
|
||||||
|
// Take snapshot at original state
|
||||||
|
auto snapshot = map.make_snapshot();
|
||||||
|
size_t serialised_snapshot_before_size = snapshot->get_serialized_size();
|
||||||
|
std::vector<uint8_t> serialised_snapshot_before(
|
||||||
|
serialised_snapshot_before_size);
|
||||||
|
snapshot->serialize(serialised_snapshot_before.data());
|
||||||
|
|
||||||
|
INFO("Meanwhile, modify map");
|
||||||
|
{
|
||||||
|
// Remove operation is not yet implemented for RBMap
|
||||||
|
if constexpr (std::is_same_v<M, ChampMap>)
|
||||||
|
{
|
||||||
|
auto all_entries = get_all_entries(map);
|
||||||
|
auto& key_to_remove = all_entries.begin()->first;
|
||||||
|
map = map.remove(key_to_remove);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modify existing key with value that must be different from what `gen_map`
|
||||||
|
// populated the map with
|
||||||
|
auto all_entries = get_all_entries(map);
|
||||||
|
auto& key_to_add = all_entries.begin()->first;
|
||||||
|
map = map.put(key_to_add, V(max_key_value_size * 2, 'x'));
|
||||||
|
}
|
||||||
|
|
||||||
|
INFO("Even though map has been updated, original snapshot is not modified");
|
||||||
|
{
|
||||||
|
REQUIRE_EQ(
|
||||||
|
snapshot->get_serialized_size(), serialised_snapshot_before_size);
|
||||||
|
std::vector<uint8_t> serialised_snapshot_after(
|
||||||
|
serialised_snapshot_before_size);
|
||||||
|
snapshot->serialize(serialised_snapshot_after.data());
|
||||||
|
REQUIRE_EQ(serialised_snapshot_before, serialised_snapshot_after);
|
||||||
|
}
|
||||||
|
|
||||||
|
INFO("But new snapshot is different");
|
||||||
|
{
|
||||||
|
auto new_snapshot = map.make_snapshot();
|
||||||
|
size_t serialised_snapshot_new_size = new_snapshot->get_serialized_size();
|
||||||
|
std::vector<uint8_t> serialised_snapshot_new(serialised_snapshot_new_size);
|
||||||
|
new_snapshot->serialize(serialised_snapshot_new.data());
|
||||||
|
REQUIRE_NE(serialised_snapshot_before, serialised_snapshot_new);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template <class S, class T>
|
template <class S, class T>
|
||||||
void verify_snapshot_compatibility(const S& source_map, T& target_map)
|
void verify_snapshot_compatibility(const S& source_map, T& target_map)
|
||||||
{
|
{
|
||||||
|
@ -432,11 +434,11 @@ void forall_threshold(const M& map, size_t threshold)
|
||||||
{
|
{
|
||||||
size_t iterations_count = 0;
|
size_t iterations_count = 0;
|
||||||
map.foreach([&iterations_count, threshold](const K& k, const V& v) {
|
map.foreach([&iterations_count, threshold](const K& k, const V& v) {
|
||||||
iterations_count++;
|
|
||||||
if (iterations_count >= threshold)
|
if (iterations_count >= threshold)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
iterations_count++;
|
||||||
return true;
|
return true;
|
||||||
});
|
});
|
||||||
REQUIRE(iterations_count == threshold);
|
REQUIRE(iterations_count == threshold);
|
||||||
|
@ -445,8 +447,8 @@ void forall_threshold(const M& map, size_t threshold)
|
||||||
TEST_CASE_TEMPLATE("Foreach", M, RBMap, ChampMap)
|
TEST_CASE_TEMPLATE("Foreach", M, RBMap, ChampMap)
|
||||||
{
|
{
|
||||||
size_t size = 100;
|
size_t size = 100;
|
||||||
size_t threshold = size / 2;
|
|
||||||
|
|
||||||
auto map = gen_map<M>(size);
|
auto map = gen_map<M>(size);
|
||||||
|
|
||||||
|
size_t threshold = map.size() / 2;
|
||||||
forall_threshold(map, threshold);
|
forall_threshold(map, threshold);
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,7 +170,7 @@ TEST_CASE("Old snapshots" * doctest::test_suite("snapshot"))
|
||||||
// Test that this code can still parse snapshots produced by old versions of
|
// Test that this code can still parse snapshots produced by old versions of
|
||||||
// the code
|
// the code
|
||||||
// NB: These raw strings are base64 encodings from
|
// NB: These raw strings are base64 encodings from
|
||||||
// `sencond_serialised_snapshot` in the "Simple snapshot" test
|
// `second_serialised_snapshot` in the "Simple snapshot" test
|
||||||
std::string raw_snapshot_b64;
|
std::string raw_snapshot_b64;
|
||||||
SUBCASE("Tombstone deletions")
|
SUBCASE("Tombstone deletions")
|
||||||
{
|
{
|
||||||
|
|
|
@ -313,6 +313,7 @@ namespace kv::untyped
|
||||||
|
|
||||||
void serialise(KvStoreSerialiser& s) override
|
void serialise(KvStoreSerialiser& s) override
|
||||||
{
|
{
|
||||||
|
LOG_TRACE_FMT("Serialising snapshot for map: {}", name);
|
||||||
s.start_map(name, security_domain);
|
s.start_map(name, security_domain);
|
||||||
s.serialise_entry_version(version);
|
s.serialise_entry_version(version);
|
||||||
|
|
||||||
|
|
|
@ -339,9 +339,7 @@ def test_expired_certs(network, args):
|
||||||
stack.enter_context(network.partitioner.partition([primary]))
|
stack.enter_context(network.partitioner.partition([primary]))
|
||||||
|
|
||||||
# Restore connectivity between backups and wait for election
|
# Restore connectivity between backups and wait for election
|
||||||
network.wait_for_primary_unanimity(
|
network.wait_for_primary_unanimity(nodes=[backup_a, backup_b], min_view=r.view)
|
||||||
nodes=[backup_a, backup_b], min_view=r.view
|
|
||||||
)
|
|
||||||
|
|
||||||
# Should now be able to make progress
|
# Should now be able to make progress
|
||||||
check_can_progress(backup_a)
|
check_can_progress(backup_a)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче