Bug 1866629 - Remove the sample_groups object in serialized counters, r=canaltinova,profiler-reviewers.

Differential Revision: https://phabricator.services.mozilla.com/D194695
This commit is contained in:
Florian Quèze 2023-12-12 11:28:07 +00:00
Родитель 9503eb0aca
Коммит 03bd9de29c
7 изменённых файлов: 245 добавлений и 328 удалений

Просмотреть файл

@ -948,15 +948,13 @@ void ProfileBuffer::StreamProfilerOverheadToJSON(
});
}
struct CounterKeyedSample {
struct CounterSample {
double mTime;
uint64_t mNumber;
int64_t mCount;
};
using CounterKeyedSamples = Vector<CounterKeyedSample>;
using CounterMap = HashMap<uint64_t, CounterKeyedSamples>;
using CounterSamples = Vector<CounterSample>;
// HashMap lookup, if not found, a default value is inserted.
// Returns reference to (existing or new) value inside the HashMap.
@ -993,41 +991,38 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
// Valid sequence in the buffer:
// CounterID
// Time
// ( CounterKey Count Number? )*
// ( Count Number? )*
//
// And the JSON (example):
// "counters": {
// "name": "malloc",
// "category": "Memory",
// "description": "Amount of allocated memory",
// "sample_groups": {
// "id": 0,
// "samples": {
// "schema": {"time": 0, "number": 1, "count": 2},
// "data": [
// [
// 16117.033968000002,
// 2446216,
// 6801320
// ],
// [
// 16118.037638,
// 2446216,
// 6801320
// ],
// "samples": {
// "schema": {"time": 0, "count": 1, "number": 2},
// "data": [
// [
// 16117.033968000002,
// 2446216,
// 6801320
// ],
// }
// }
// },
// [
// 16118.037638,
// 2446216,
// 6801320
// ],
// ],
// },
// }
// Build the map of counters and populate it
HashMap<void*, CounterMap> counters;
HashMap<void*, CounterSamples> counters;
while (e.Has()) {
// skip all non-Counters, including if we start in the middle of a counter
if (e.Get().IsCounterId()) {
void* id = e.Get().GetPtr();
CounterMap& counter = LookupOrAdd(counters, id);
CounterSamples& data = LookupOrAdd(counters, id);
e.Next();
if (!e.Has() || !e.Get().IsTime()) {
ERROR_AND_CONTINUE("expected a Time entry");
@ -1035,25 +1030,20 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
double time = e.Get().GetDouble();
e.Next();
if (time >= aSinceTime) {
while (e.Has() && e.Get().IsCounterKey()) {
uint64_t key = e.Get().GetUint64();
CounterKeyedSamples& data = LookupOrAdd(counter, key);
e.Next();
if (!e.Has() || !e.Get().IsCount()) {
ERROR_AND_CONTINUE("expected a Count entry");
}
int64_t count = e.Get().GetUint64();
e.Next();
uint64_t number;
if (!e.Has() || !e.Get().IsNumber()) {
number = 0;
} else {
number = e.Get().GetInt64();
e.Next();
}
CounterKeyedSample sample = {time, number, count};
MOZ_RELEASE_ASSERT(data.append(sample));
if (!e.Has() || !e.Get().IsCount()) {
ERROR_AND_CONTINUE("expected a Count entry");
}
int64_t count = e.Get().GetUint64();
e.Next();
uint64_t number;
if (!e.Has() || !e.Get().IsNumber()) {
number = 0;
} else {
number = e.Get().GetInt64();
e.Next();
}
CounterSample sample = {time, number, count};
MOZ_RELEASE_ASSERT(data.append(sample));
} else {
// skip counter sample - only need to skip the initial counter
// id, then let the loop at the top skip the rest
@ -1062,14 +1052,18 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
e.Next();
}
}
// we have a map of a map of counter entries; dump them to JSON
// we have a map of counter entries; dump them to JSON
if (counters.count() == 0) {
return;
}
aWriter.StartArrayProperty("counters");
for (auto iter = counters.iter(); !iter.done(); iter.next()) {
CounterMap& counter = iter.get().value();
CounterSamples& samples = iter.get().value();
size_t size = samples.length();
if (size == 0) {
continue;
}
const BaseProfilerCount* base_counter =
static_cast<const BaseProfilerCount*>(iter.get().key());
@ -1080,70 +1074,51 @@ void ProfileBuffer::StreamCountersToJSON(SpliceableJSONWriter& aWriter,
aWriter.StringProperty("description",
MakeStringSpan(base_counter->mDescription));
aWriter.StartArrayProperty("sample_groups");
for (auto counter_iter = counter.iter(); !counter_iter.done();
counter_iter.next()) {
CounterKeyedSamples& samples = counter_iter.get().value();
uint64_t key = counter_iter.get().key();
size_t size = samples.length();
if (size == 0) {
continue;
bool hasNumber = false;
for (size_t i = 0; i < size; i++) {
if (samples[i].mNumber != 0) {
hasNumber = true;
break;
}
bool hasNumber = false;
for (size_t i = 0; i < size; i++) {
if (samples[i].mNumber != 0) {
hasNumber = true;
break;
}
}
aWriter.StartObjectElement();
{
aWriter.IntProperty("id", static_cast<int64_t>(key));
aWriter.StartObjectProperty("samples");
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("time");
schema.WriteField("count");
if (hasNumber) {
schema.WriteField("number");
}
}
aWriter.StartArrayProperty("data");
uint64_t previousNumber = 0;
int64_t previousCount = 0;
for (size_t i = 0; i < size; i++) {
// Encode as deltas, and only encode if different than the last
// sample
if (i == 0 || samples[i].mNumber != previousNumber ||
samples[i].mCount != previousCount) {
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
MOZ_ASSERT(samples[i].mNumber - previousNumber <=
uint64_t(std::numeric_limits<int64_t>::max()));
AutoArraySchemaWriter writer(aWriter);
writer.TimeMsElement(TIME, samples[i].mTime);
writer.IntElement(COUNT, samples[i].mCount - previousCount);
if (hasNumber) {
writer.IntElement(
NUMBER,
static_cast<int64_t>(samples[i].mNumber - previousNumber));
}
previousNumber = samples[i].mNumber;
previousCount = samples[i].mCount;
}
}
aWriter.EndArray(); // data
aWriter.EndObject(); // samples
}
aWriter.EndObject(); // sample_groups item
}
aWriter.EndArray(); // sample groups
aWriter.End(); // for each counter
aWriter.StartObjectProperty("samples");
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("time");
schema.WriteField("count");
if (hasNumber) {
schema.WriteField("number");
}
}
aWriter.StartArrayProperty("data");
uint64_t previousNumber = 0;
int64_t previousCount = 0;
for (size_t i = 0; i < size; i++) {
// Encode as deltas, and only encode if different than the last
// sample
if (i == 0 || samples[i].mNumber != previousNumber ||
samples[i].mCount != previousCount) {
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
MOZ_ASSERT(samples[i].mNumber - previousNumber <=
uint64_t(std::numeric_limits<int64_t>::max()));
AutoArraySchemaWriter writer(aWriter);
writer.TimeMsElement(TIME, samples[i].mTime);
writer.IntElement(COUNT, samples[i].mCount - previousCount);
if (hasNumber) {
writer.IntElement(NUMBER, static_cast<int64_t>(samples[i].mNumber -
previousNumber));
}
previousNumber = samples[i].mNumber;
previousCount = samples[i].mCount;
}
}
aWriter.EndArray(); // data
aWriter.EndObject(); // samples
aWriter.End(); // for each counter
}
aWriter.EndArray(); // counters
});
@ -1261,7 +1236,6 @@ bool ProfileBuffer::DuplicateLastSample(BaseProfilerThreadId aThreadId,
ProfileBufferEntry::Time(
(TimeStamp::Now() - aProcessStartTime).ToMilliseconds()));
break;
case ProfileBufferEntry::Kind::CounterKey:
case ProfileBufferEntry::Kind::Number:
case ProfileBufferEntry::Kind::Count:
case ProfileBufferEntry::Kind::Responsiveness:

Просмотреть файл

@ -2348,14 +2348,9 @@ void SamplerThread::Run() {
// create Buffer entries for each counter
buffer.AddEntry(ProfileBufferEntry::CounterId(counter));
buffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
// XXX support keyed maps of counts
// In the future, we'll support keyed counters - for example, counters
// with a key which is a thread ID. For "simple" counters we'll just
// use a key of 0.
int64_t count;
uint64_t number;
counter->Sample(count, number);
buffer.AddEntry(ProfileBufferEntry::CounterKey(0));
buffer.AddEntry(ProfileBufferEntry::Count(count));
if (number) {
buffer.AddEntry(ProfileBufferEntry::Number(number));

Просмотреть файл

@ -24,7 +24,7 @@ namespace mozilla {
class ProfileBufferChunkManagerWithLocalLimit;
// Centrally defines the version of the gecko profiler JSON format.
const int GECKO_PROFILER_FORMAT_VERSION = 28;
const int GECKO_PROFILER_FORMAT_VERSION = 29;
namespace baseprofiler::detail {

Просмотреть файл

@ -43,7 +43,6 @@ static constexpr size_t ProfileBufferEntryNumChars = 8;
MACRO(TimeBeforeCompactStack, double, sizeof(double)) \
MACRO(TimeBeforeSameSample, double, sizeof(double)) \
MACRO(CounterId, void*, sizeof(void*)) \
MACRO(CounterKey, uint64_t, sizeof(uint64_t)) \
MACRO(Number, uint64_t, sizeof(uint64_t)) \
MACRO(Count, int64_t, sizeof(int64_t)) \
MACRO(ProfilerOverheadTime, double, sizeof(double)) \

Просмотреть файл

@ -1774,18 +1774,16 @@ void ProfileBuffer::StreamProfilerOverheadToJSON(
});
}
struct CounterKeyedSample {
struct CounterSample {
double mTime;
uint64_t mNumber;
int64_t mCount;
};
using CounterKeyedSamples = Vector<CounterKeyedSample>;
using CounterSamples = Vector<CounterSample>;
static LazyLogModule sFuzzyfoxLog("Fuzzyfox");
using CounterMap = HashMap<uint64_t, CounterKeyedSamples>;
// HashMap lookup, if not found, a default value is inserted.
// Returns reference to (existing or new) value inside the HashMap.
template <typename HashM, typename Key>
@ -1822,41 +1820,38 @@ void ProfileBuffer::StreamCountersToJSON(
// Valid sequence in the buffer:
// CounterID
// Time
// ( CounterKey Count Number? )*
// ( Count Number? )*
//
// And the JSON (example):
// "counters": {
// "name": "malloc",
// "category": "Memory",
// "description": "Amount of allocated memory",
// "sample_groups": {
// "id": 0,
// "samples": {
// "schema": {"time": 0, "number": 1, "count": 2},
// "data": [
// [
// 16117.033968000002,
// 2446216,
// 6801320
// ],
// [
// 16118.037638,
// 2446216,
// 6801320
// ],
// "samples": {
// "schema": {"time": 0, "count": 1, "number": 2},
// "data": [
// [
// 16117.033968000002,
// 2446216,
// 6801320
// ],
// }
// }
// },
// [
// 16118.037638,
// 2446216,
// 6801320
// ],
// ],
// },
// }
// Build the map of counters and populate it
HashMap<void*, CounterMap> counters;
HashMap<void*, CounterSamples> counters;
while (e.Has()) {
// skip all non-Counters, including if we start in the middle of a counter
if (e.Get().IsCounterId()) {
void* id = e.Get().GetPtr();
CounterMap& counter = LookupOrAdd(counters, id);
CounterSamples& data = LookupOrAdd(counters, id);
e.Next();
if (!e.Has() || !e.Get().IsTime()) {
ERROR_AND_CONTINUE("expected a Time entry");
@ -1864,25 +1859,20 @@ void ProfileBuffer::StreamCountersToJSON(
double time = e.Get().GetDouble();
e.Next();
if (time >= aSinceTime) {
while (e.Has() && e.Get().IsCounterKey()) {
uint64_t key = e.Get().GetUint64();
CounterKeyedSamples& data = LookupOrAdd(counter, key);
e.Next();
if (!e.Has() || !e.Get().IsCount()) {
ERROR_AND_CONTINUE("expected a Count entry");
}
int64_t count = e.Get().GetUint64();
e.Next();
uint64_t number;
if (!e.Has() || !e.Get().IsNumber()) {
number = 0;
} else {
number = e.Get().GetInt64();
e.Next();
}
CounterKeyedSample sample = {time, number, count};
MOZ_RELEASE_ASSERT(data.append(sample));
if (!e.Has() || !e.Get().IsCount()) {
ERROR_AND_CONTINUE("expected a Count entry");
}
int64_t count = e.Get().GetUint64();
e.Next();
uint64_t number;
if (!e.Has() || !e.Get().IsNumber()) {
number = 0;
} else {
number = e.Get().GetInt64();
e.Next();
}
CounterSample sample = {time, number, count};
MOZ_RELEASE_ASSERT(data.append(sample));
} else {
// skip counter sample - only need to skip the initial counter
// id, then let the loop at the top skip the rest
@ -1891,14 +1881,18 @@ void ProfileBuffer::StreamCountersToJSON(
e.Next();
}
}
// we have a map of a map of counter entries; dump them to JSON
// we have a map of counter entries; dump them to JSON
if (counters.count() == 0) {
return;
}
aWriter.StartArrayProperty("counters");
for (auto iter = counters.iter(); !iter.done(); iter.next()) {
CounterMap& counter = iter.get().value();
CounterSamples& samples = iter.get().value();
size_t size = samples.length();
if (size == 0) {
continue;
}
const BaseProfilerCount* base_counter =
static_cast<const BaseProfilerCount*>(iter.get().key());
@ -1909,103 +1903,83 @@ void ProfileBuffer::StreamCountersToJSON(
aWriter.StringProperty("description",
MakeStringSpan(base_counter->mDescription));
aWriter.StartArrayProperty("sample_groups");
for (auto counter_iter = counter.iter(); !counter_iter.done();
counter_iter.next()) {
CounterKeyedSamples& samples = counter_iter.get().value();
uint64_t key = counter_iter.get().key();
size_t size = samples.length();
if (size == 0) {
continue;
bool hasNumber = false;
for (size_t i = 0; i < size; i++) {
if (samples[i].mNumber != 0) {
hasNumber = true;
break;
}
bool hasNumber = false;
for (size_t i = 0; i < size; i++) {
if (samples[i].mNumber != 0) {
hasNumber = true;
break;
}
}
aWriter.StartObjectElement();
{
aWriter.IntProperty("id", static_cast<int64_t>(key));
aWriter.StartObjectProperty("samples");
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("time");
schema.WriteField("count");
if (hasNumber) {
schema.WriteField("number");
}
}
aWriter.StartArrayProperty("data");
double previousSkippedTime = 0.0;
uint64_t previousNumber = 0;
int64_t previousCount = 0;
for (size_t i = 0; i < size; i++) {
// Encode as deltas, and only encode if different than the previous
// or next sample; Always write the first and last samples.
if (i == 0 || i == size - 1 ||
samples[i].mNumber != previousNumber ||
samples[i].mCount != previousCount ||
// Ensure we ouput the first 0 before skipping samples.
(i >= 2 && (samples[i - 2].mNumber != previousNumber ||
samples[i - 2].mCount != previousCount))) {
if (i != 0 && samples[i].mTime >= samples[i - 1].mTime) {
MOZ_LOG(sFuzzyfoxLog, mozilla::LogLevel::Error,
("Fuzzyfox Profiler Assertion: %f >= %f",
samples[i].mTime, samples[i - 1].mTime));
}
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
MOZ_ASSERT(samples[i].mNumber - previousNumber <=
uint64_t(std::numeric_limits<int64_t>::max()));
int64_t numberDelta =
static_cast<int64_t>(samples[i].mNumber - previousNumber);
int64_t countDelta = samples[i].mCount - previousCount;
if (previousSkippedTime != 0.0 &&
(numberDelta != 0 || countDelta != 0)) {
// Write the last skipped sample, unless the new one is all
// zeroes (that'd be redundant) This is useful to know when a
// certain value was last sampled, so that the front-end graph
// will be more correct.
AutoArraySchemaWriter writer(aWriter);
writer.TimeMsElement(TIME, previousSkippedTime);
// The deltas are effectively zeroes, since no change happened
// between the last actually-written sample and the last skipped
// one.
writer.IntElement(COUNT, 0);
if (hasNumber) {
writer.IntElement(NUMBER, 0);
}
}
AutoArraySchemaWriter writer(aWriter);
writer.TimeMsElement(TIME, samples[i].mTime);
writer.IntElement(COUNT, countDelta);
if (hasNumber) {
writer.IntElement(NUMBER, numberDelta);
}
previousSkippedTime = 0.0;
previousNumber = samples[i].mNumber;
previousCount = samples[i].mCount;
} else {
previousSkippedTime = samples[i].mTime;
}
}
aWriter.EndArray(); // data
aWriter.EndObject(); // samples
}
aWriter.EndObject(); // sample_groups item
}
aWriter.EndArray(); // sample groups
aWriter.End(); // for each counter
aWriter.StartObjectProperty("samples");
{
JSONSchemaWriter schema(aWriter);
schema.WriteField("time");
schema.WriteField("count");
if (hasNumber) {
schema.WriteField("number");
}
}
aWriter.StartArrayProperty("data");
double previousSkippedTime = 0.0;
uint64_t previousNumber = 0;
int64_t previousCount = 0;
for (size_t i = 0; i < size; i++) {
// Encode as deltas, and only encode if different than the previous
// or next sample; Always write the first and last samples.
if (i == 0 || i == size - 1 || samples[i].mNumber != previousNumber ||
samples[i].mCount != previousCount ||
// Ensure we ouput the first 0 before skipping samples.
(i >= 2 && (samples[i - 2].mNumber != previousNumber ||
samples[i - 2].mCount != previousCount))) {
if (i != 0 && samples[i].mTime >= samples[i - 1].mTime) {
MOZ_LOG(sFuzzyfoxLog, mozilla::LogLevel::Error,
("Fuzzyfox Profiler Assertion: %f >= %f", samples[i].mTime,
samples[i - 1].mTime));
}
MOZ_ASSERT(i == 0 || samples[i].mTime >= samples[i - 1].mTime);
MOZ_ASSERT(samples[i].mNumber >= previousNumber);
MOZ_ASSERT(samples[i].mNumber - previousNumber <=
uint64_t(std::numeric_limits<int64_t>::max()));
int64_t numberDelta =
static_cast<int64_t>(samples[i].mNumber - previousNumber);
int64_t countDelta = samples[i].mCount - previousCount;
if (previousSkippedTime != 0.0 &&
(numberDelta != 0 || countDelta != 0)) {
// Write the last skipped sample, unless the new one is all
// zeroes (that'd be redundant) This is useful to know when a
// certain value was last sampled, so that the front-end graph
// will be more correct.
AutoArraySchemaWriter writer(aWriter);
writer.TimeMsElement(TIME, previousSkippedTime);
// The deltas are effectively zeroes, since no change happened
// between the last actually-written sample and the last skipped
// one.
writer.IntElement(COUNT, 0);
if (hasNumber) {
writer.IntElement(NUMBER, 0);
}
}
AutoArraySchemaWriter writer(aWriter);
writer.TimeMsElement(TIME, samples[i].mTime);
writer.IntElement(COUNT, countDelta);
if (hasNumber) {
writer.IntElement(NUMBER, numberDelta);
}
previousSkippedTime = 0.0;
previousNumber = samples[i].mNumber;
previousCount = samples[i].mCount;
} else {
previousSkippedTime = samples[i].mTime;
}
}
aWriter.EndArray(); // data
aWriter.EndObject(); // samples
aWriter.End(); // for each counter
}
aWriter.EndArray(); // counters
});
@ -2228,7 +2202,6 @@ bool ProfileBuffer::DuplicateLastSample(ProfilerThreadId aThreadId,
// We're done.
return true;
}
case ProfileBufferEntry::Kind::CounterKey:
case ProfileBufferEntry::Kind::Number:
case ProfileBufferEntry::Kind::Count:
// Don't copy anything not part of a thread's stack sample

Просмотреть файл

@ -4236,10 +4236,6 @@ void SamplerThread::Run() {
ActivePS::ControlledChunkManager(lock).TotalSize());
}
#endif
// In the future, we may support keyed counters - for example,
// counters with a key which is a thread ID. For "simple" counters
// we'll just use a key of 0.
buffer.AddEntry(ProfileBufferEntry::CounterKey(0));
buffer.AddEntry(ProfileBufferEntry::Count(sample.count));
if (sample.number) {
buffer.AddEntry(ProfileBufferEntry::Number(sample.number));

Просмотреть файл

@ -1393,30 +1393,24 @@ static void JSONRootCheck(const Json::Value& aRoot,
EXPECT_HAS_JSON(counter["name"], String);
EXPECT_HAS_JSON(counter["category"], String);
EXPECT_HAS_JSON(counter["description"], String);
GET_JSON(sampleGroups, counter["sample_groups"], Array);
for (const Json::Value& sampleGroup : sampleGroups) {
ASSERT_TRUE(sampleGroup.isObject());
EXPECT_HAS_JSON(sampleGroup["id"], UInt);
GET_JSON(samples, sampleGroup["samples"], Object);
GET_JSON(samplesSchema, samples["schema"], Object);
EXPECT_GE(samplesSchema.size(), 3u);
GET_JSON_VALUE(samplesTime, samplesSchema["time"], UInt);
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
GET_JSON(samplesData, samples["data"], Array);
double previousTime = 0.0;
for (const Json::Value& sample : samplesData) {
ASSERT_TRUE(sample.isArray());
GET_JSON_VALUE(time, sample[samplesTime], Double);
EXPECT_GE(time, previousTime);
previousTime = time;
if (sample.isValidIndex(samplesNumber)) {
EXPECT_HAS_JSON(sample[samplesNumber], UInt64);
}
if (sample.isValidIndex(samplesCount)) {
EXPECT_HAS_JSON(sample[samplesCount], Int64);
}
GET_JSON(samples, counter["samples"], Object);
GET_JSON(samplesSchema, samples["schema"], Object);
EXPECT_GE(samplesSchema.size(), 3u);
GET_JSON_VALUE(samplesTime, samplesSchema["time"], UInt);
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
GET_JSON(samplesData, samples["data"], Array);
double previousTime = 0.0;
for (const Json::Value& sample : samplesData) {
ASSERT_TRUE(sample.isArray());
GET_JSON_VALUE(time, sample[samplesTime], Double);
EXPECT_GE(time, previousTime);
previousTime = time;
if (sample.isValidIndex(samplesNumber)) {
EXPECT_HAS_JSON(sample[samplesNumber], UInt64);
}
if (sample.isValidIndex(samplesCount)) {
EXPECT_HAS_JSON(sample[samplesCount], Int64);
}
}
}
@ -3572,56 +3566,42 @@ TEST(GeckoProfiler, Counters)
if (name == "TestCounter") {
EXPECT_EQ_JSON(counter["category"], String, COUNTER_NAME);
EXPECT_EQ_JSON(counter["description"], String, COUNTER_DESCRIPTION);
GET_JSON(sampleGroups, counter["sample_groups"], Array);
for (const Json::Value& sampleGroup : sampleGroups) {
ASSERT_TRUE(sampleGroup.isObject());
EXPECT_EQ_JSON(sampleGroup["id"], UInt, 0u);
GET_JSON(samples, sampleGroup["samples"], Object);
GET_JSON(samplesSchema, samples["schema"], Object);
EXPECT_GE(samplesSchema.size(), 3u);
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
GET_JSON(samplesData, samples["data"], Array);
for (const Json::Value& sample : samplesData) {
ASSERT_TRUE(sample.isArray());
ASSERT_LT(nextExpectedTestCounter, expectedTestCountersCount);
EXPECT_EQ_JSON(
sample[samplesNumber], UInt64,
expectedTestCounters[nextExpectedTestCounter].mNumber);
EXPECT_EQ_JSON(
sample[samplesCount], Int64,
expectedTestCounters[nextExpectedTestCounter].mCount);
++nextExpectedTestCounter;
}
GET_JSON(samples, counter["samples"], Object);
GET_JSON(samplesSchema, samples["schema"], Object);
EXPECT_GE(samplesSchema.size(), 3u);
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
GET_JSON(samplesData, samples["data"], Array);
for (const Json::Value& sample : samplesData) {
ASSERT_TRUE(sample.isArray());
ASSERT_LT(nextExpectedTestCounter, expectedTestCountersCount);
EXPECT_EQ_JSON(sample[samplesNumber], UInt64,
expectedTestCounters[nextExpectedTestCounter].mNumber);
EXPECT_EQ_JSON(sample[samplesCount], Int64,
expectedTestCounters[nextExpectedTestCounter].mCount);
++nextExpectedTestCounter;
}
} else if (name == "TestCounter2") {
EXPECT_TRUE(expectCounter2);
EXPECT_EQ_JSON(counter["category"], String, COUNTER_NAME2);
EXPECT_EQ_JSON(counter["description"], String, COUNTER_DESCRIPTION2);
GET_JSON(sampleGroups, counter["sample_groups"], Array);
for (const Json::Value& sampleGroup : sampleGroups) {
ASSERT_TRUE(sampleGroup.isObject());
EXPECT_EQ_JSON(sampleGroup["id"], UInt, 0u);
GET_JSON(samples, sampleGroup["samples"], Object);
GET_JSON(samplesSchema, samples["schema"], Object);
EXPECT_GE(samplesSchema.size(), 3u);
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
GET_JSON(samplesData, samples["data"], Array);
for (const Json::Value& sample : samplesData) {
ASSERT_TRUE(sample.isArray());
ASSERT_LT(nextExpectedTestCounter2, expectedTestCounters2Count);
EXPECT_EQ_JSON(
sample[samplesNumber], UInt64,
expectedTestCounters2[nextExpectedTestCounter2].mNumber);
EXPECT_EQ_JSON(
sample[samplesCount], Int64,
expectedTestCounters2[nextExpectedTestCounter2].mCount);
++nextExpectedTestCounter2;
}
GET_JSON(samples, counter["samples"], Object);
GET_JSON(samplesSchema, samples["schema"], Object);
EXPECT_GE(samplesSchema.size(), 3u);
GET_JSON_VALUE(samplesNumber, samplesSchema["number"], UInt);
GET_JSON_VALUE(samplesCount, samplesSchema["count"], UInt);
GET_JSON(samplesData, samples["data"], Array);
for (const Json::Value& sample : samplesData) {
ASSERT_TRUE(sample.isArray());
ASSERT_LT(nextExpectedTestCounter2, expectedTestCounters2Count);
EXPECT_EQ_JSON(
sample[samplesNumber], UInt64,
expectedTestCounters2[nextExpectedTestCounter2].mNumber);
EXPECT_EQ_JSON(
sample[samplesCount], Int64,
expectedTestCounters2[nextExpectedTestCounter2].mCount);
++nextExpectedTestCounter2;
}
}
}