зеркало из https://github.com/mozilla/glean.git
[UniFFI] Migrate memory distribution metric type implementation
This commit is contained in:
Родитель
75ce66a603
Коммит
ebcc9ca694
|
@ -18,7 +18,7 @@ interface HistogramBase {
|
|||
* method was called on is using [TimeUnit.SECOND], then `samples` are assumed
|
||||
* to be in that unit).
|
||||
*
|
||||
* @param samples the [LongArray] holding the samples to be recorded by the metric.
|
||||
* @param samples the [List<Long>] holding the samples to be recorded by the metric.
|
||||
*/
|
||||
fun accumulateSamples(samples: LongArray)
|
||||
fun accumulateSamples(samples: List<Long>)
|
||||
}
|
||||
|
|
|
@ -5,12 +5,7 @@
|
|||
package mozilla.telemetry.glean.private
|
||||
|
||||
import androidx.annotation.VisibleForTesting
|
||||
import com.sun.jna.StringArray
|
||||
import mozilla.telemetry.glean.Dispatchers
|
||||
import mozilla.telemetry.glean.rust.LibGleanFFI
|
||||
import mozilla.telemetry.glean.rust.getAndConsumeRustString
|
||||
import mozilla.telemetry.glean.rust.toBoolean
|
||||
import mozilla.telemetry.glean.rust.toByte
|
||||
import mozilla.telemetry.glean.internal.MemoryDistributionMetric
|
||||
import mozilla.telemetry.glean.testing.ErrorType
|
||||
|
||||
/**
|
||||
|
@ -19,138 +14,32 @@ import mozilla.telemetry.glean.testing.ErrorType
|
|||
* Instances of this class type are automatically generated by the parsers at build time,
|
||||
* allowing developers to record values that were previously registered in the metrics.yaml file.
|
||||
*/
|
||||
class MemoryDistributionMetricType internal constructor(
|
||||
private var handle: Long,
|
||||
private val disabled: Boolean,
|
||||
private val sendInPings: List<String>
|
||||
) : HistogramBase {
|
||||
/**
|
||||
* The public constructor used by automatically generated metrics.
|
||||
*/
|
||||
constructor(
|
||||
disabled: Boolean,
|
||||
category: String,
|
||||
lifetime: Lifetime,
|
||||
name: String,
|
||||
sendInPings: List<String>,
|
||||
memoryUnit: MemoryUnit
|
||||
) : this(handle = 0, disabled = disabled, sendInPings = sendInPings) {
|
||||
val ffiPingsList = StringArray(sendInPings.toTypedArray(), "utf-8")
|
||||
this.handle = LibGleanFFI.INSTANCE.glean_new_memory_distribution_metric(
|
||||
category = category,
|
||||
name = name,
|
||||
send_in_pings = ffiPingsList,
|
||||
send_in_pings_len = sendInPings.size,
|
||||
lifetime = lifetime.ordinal,
|
||||
disabled = disabled.toByte(),
|
||||
memory_unit = memoryUnit.ordinal
|
||||
)
|
||||
}
|
||||
class MemoryDistributionMetricType(meta: CommonMetricData, memoryUnit: MemoryUnit) : HistogramBase {
|
||||
val inner = MemoryDistributionMetric(meta, memoryUnit)
|
||||
|
||||
/**
|
||||
* Record a single value, in the unit specified by `memoryUnit`, to the distribution.
|
||||
*
|
||||
* @param sample the value
|
||||
* Delegate common methods to the underlying type directly.
|
||||
*/
|
||||
fun accumulate(sample: Long) {
|
||||
if (disabled) {
|
||||
return
|
||||
}
|
||||
|
||||
@Suppress("EXPERIMENTAL_API_USAGE")
|
||||
Dispatchers.API.launch {
|
||||
LibGleanFFI.INSTANCE.glean_memory_distribution_accumulate(
|
||||
this@MemoryDistributionMetricType.handle,
|
||||
sample
|
||||
)
|
||||
}
|
||||
}
|
||||
fun accumulate(sample: Long ) = inner.accumulate(sample)
|
||||
|
||||
override fun accumulateSamples(samples: LongArray) {
|
||||
if (disabled) {
|
||||
return
|
||||
}
|
||||
|
||||
// The reason we're using [Long](s) instead of [UInt](s) in Kotlin-land is
|
||||
// the lack of [UInt] (in stable form). The positive part of [Int] would not
|
||||
// be enough to represent the values coming in:
|
||||
// - [UInt.MAX_VALUE] is 4294967295
|
||||
// - [Int.MAX_VALUE] is 2147483647
|
||||
// - [Long.MAX_VALUE] is 9223372036854775807
|
||||
//
|
||||
// On the rust side, Long(s) are handled as i64 and then casted to u64.
|
||||
@Suppress("EXPERIMENTAL_API_USAGE")
|
||||
Dispatchers.API.launch {
|
||||
LibGleanFFI.INSTANCE.glean_memory_distribution_accumulate_samples(
|
||||
this@MemoryDistributionMetricType.handle,
|
||||
samples,
|
||||
samples.size
|
||||
)
|
||||
}
|
||||
}
|
||||
override fun accumulateSamples(samples: List<Long>) = inner.accumulateSamples(samples)
|
||||
|
||||
/**
|
||||
* Tests whether a value is stored for the metric for testing purposes only. This function will
|
||||
* attempt to await the last task (if any) writing to the the metric's storage engine before
|
||||
* returning a value.
|
||||
*
|
||||
* @param pingName represents the name of the ping to retrieve the metric for.
|
||||
* Defaults to the first value in `sendInPings`.
|
||||
* @return true if metric value exists, otherwise false
|
||||
* Testing-only methods get an annotation
|
||||
*/
|
||||
|
||||
@VisibleForTesting(otherwise = VisibleForTesting.NONE)
|
||||
@JvmOverloads
|
||||
fun testHasValue(pingName: String = sendInPings.first()): Boolean {
|
||||
@Suppress("EXPERIMENTAL_API_USAGE")
|
||||
Dispatchers.API.assertInTestingMode()
|
||||
fun testGetValue(pingName: String? = null) = inner.testGetValue(pingName)
|
||||
|
||||
return LibGleanFFI
|
||||
.INSTANCE.glean_memory_distribution_test_has_value(this.handle, pingName)
|
||||
.toBoolean()
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the stored value for testing purposes only. This function will attempt to await the
|
||||
* last task (if any) writing to the the metric's storage engine before returning a value.
|
||||
*
|
||||
* @param pingName represents the name of the ping to retrieve the metric for.
|
||||
* Defaults to the first value in `sendInPings`.
|
||||
* @return value of the stored metric
|
||||
* @throws [NullPointerException] if no value is stored
|
||||
*/
|
||||
@VisibleForTesting(otherwise = VisibleForTesting.NONE)
|
||||
@JvmOverloads
|
||||
fun testGetValue(pingName: String = sendInPings.first()): DistributionData {
|
||||
@Suppress("EXPERIMENTAL_API_USAGE")
|
||||
Dispatchers.API.assertInTestingMode()
|
||||
fun testGetNumRecordedErrors(error: ErrorType, pingName: String? = null) = inner.testGetNumRecordedErrors(error, pingName)
|
||||
|
||||
if (!testHasValue(pingName)) {
|
||||
throw NullPointerException("Metric has no value")
|
||||
}
|
||||
|
||||
val ptr = LibGleanFFI.INSTANCE.glean_memory_distribution_test_get_value_as_json_string(
|
||||
this.handle,
|
||||
pingName)!!
|
||||
|
||||
return DistributionData.fromJsonString(ptr.getAndConsumeRustString())!!
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of errors recorded for the given metric.
|
||||
*
|
||||
* @param errorType The type of the error recorded.
|
||||
* @param pingName represents the name of the ping to retrieve the metric for.
|
||||
* Defaults to the first value in `sendInPings`.
|
||||
* @return the number of errors recorded for the metric.
|
||||
*/
|
||||
@VisibleForTesting(otherwise = VisibleForTesting.NONE)
|
||||
@JvmOverloads
|
||||
fun testGetNumRecordedErrors(errorType: ErrorType, pingName: String = sendInPings.first()): Int {
|
||||
@Suppress("EXPERIMENTAL_API_USAGE")
|
||||
Dispatchers.API.assertInTestingMode()
|
||||
|
||||
return LibGleanFFI.INSTANCE.glean_memory_distribution_test_get_num_recorded_errors(
|
||||
this.handle, errorType.ordinal, pingName
|
||||
)
|
||||
fun testHasValue(pingName: String? = null): Boolean {
|
||||
return this.testGetValue(pingName) != null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
package mozilla.telemetry.glean.private
|
||||
|
||||
import androidx.test.core.app.ApplicationProvider
|
||||
import java.lang.NullPointerException
|
||||
import kotlinx.coroutines.ExperimentalCoroutinesApi
|
||||
import kotlinx.coroutines.ObsoleteCoroutinesApi
|
||||
import mozilla.telemetry.glean.testing.ErrorType
|
||||
|
@ -13,6 +12,7 @@ import mozilla.telemetry.glean.testing.GleanTestRule
|
|||
import org.junit.Assert.assertEquals
|
||||
import org.junit.Assert.assertFalse
|
||||
import org.junit.Assert.assertTrue
|
||||
import org.junit.Assert.assertNull
|
||||
import org.junit.Rule
|
||||
import org.junit.Test
|
||||
import org.junit.runner.RunWith
|
||||
|
@ -29,14 +29,13 @@ class MemoryDistributionMetricTypeTest {
|
|||
@Test
|
||||
fun `The API saves to its storage engine`() {
|
||||
// Define a memory distribution metric which will be stored in "store1"
|
||||
val metric = MemoryDistributionMetricType(
|
||||
val metric = MemoryDistributionMetricType(CommonMetricData(
|
||||
disabled = false,
|
||||
category = "telemetry",
|
||||
lifetime = Lifetime.Ping,
|
||||
lifetime = Lifetime.PING,
|
||||
name = "memory_distribution",
|
||||
sendInPings = listOf("store1"),
|
||||
memoryUnit = MemoryUnit.Kilobyte
|
||||
)
|
||||
), memoryUnit = MemoryUnit.KILOBYTE)
|
||||
|
||||
// Accumulate a few values
|
||||
for (i in 1L..3L) {
|
||||
|
@ -47,54 +46,52 @@ class MemoryDistributionMetricTypeTest {
|
|||
|
||||
// Check that data was properly recorded.
|
||||
assertTrue(metric.testHasValue())
|
||||
val snapshot = metric.testGetValue()
|
||||
val snapshot = metric.testGetValue()!!
|
||||
// Check the sum
|
||||
assertEquals(1L * kb + 2L * kb + 3L * kb, snapshot.sum)
|
||||
// Check that the 1L fell into the first value bucket
|
||||
assertEquals(1L, snapshot.values[1023])
|
||||
assertEquals(1L, snapshot.values["1023"])
|
||||
// Check that the 2L fell into the second value bucket
|
||||
assertEquals(1L, snapshot.values[2047])
|
||||
assertEquals(1L, snapshot.values["2047"])
|
||||
// Check that the 3L fell into the third value bucket
|
||||
assertEquals(1L, snapshot.values[3024])
|
||||
assertEquals(1L, snapshot.values["3024"])
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `values are truncated to 1TB`() {
|
||||
// Define a memory distribution metric which will be stored in "store1"
|
||||
val metric = MemoryDistributionMetricType(
|
||||
val metric = MemoryDistributionMetricType(CommonMetricData(
|
||||
disabled = false,
|
||||
category = "telemetry",
|
||||
lifetime = Lifetime.Ping,
|
||||
lifetime = Lifetime.PING,
|
||||
name = "memory_distribution",
|
||||
sendInPings = listOf("store1"),
|
||||
memoryUnit = MemoryUnit.Gigabyte
|
||||
)
|
||||
), memoryUnit = MemoryUnit.GIGABYTE)
|
||||
|
||||
metric.accumulate(2048L)
|
||||
|
||||
// Check that data was properly recorded.
|
||||
assertTrue(metric.testHasValue())
|
||||
val snapshot = metric.testGetValue()
|
||||
val snapshot = metric.testGetValue()!!
|
||||
// Check the sum
|
||||
assertEquals(1L shl 40, snapshot.sum)
|
||||
// Check that the 1L fell into 1TB bucket
|
||||
assertEquals(1L, snapshot.values[(1L shl 40) - 1])
|
||||
assertEquals(1L, snapshot.values[((1L shl 40) - 1).toString()])
|
||||
// Check that an error was recorded
|
||||
assertEquals(1, metric.testGetNumRecordedErrors(ErrorType.InvalidValue))
|
||||
assertEquals(1, metric.testGetNumRecordedErrors(ErrorType.INVALID_VALUE))
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `disabled memory distributions must not record data`() {
|
||||
// Define a memory distribution metric which will be stored in "store1"
|
||||
// It's lifetime is set to Lifetime.Ping so it should not record anything.
|
||||
val metric = MemoryDistributionMetricType(
|
||||
// It's lifetime is set to Lifetime.PING SO IT SHOULD NOT RECORD ANYTHING.
|
||||
val metric = MemoryDistributionMetricType(CommonMetricData(
|
||||
disabled = true,
|
||||
category = "telemetry",
|
||||
lifetime = Lifetime.Ping,
|
||||
lifetime = Lifetime.PING,
|
||||
name = "memory_distribution",
|
||||
sendInPings = listOf("store1"),
|
||||
memoryUnit = MemoryUnit.Kilobyte
|
||||
)
|
||||
), memoryUnit = MemoryUnit.KILOBYTE)
|
||||
|
||||
metric.accumulate(1L)
|
||||
|
||||
|
@ -103,31 +100,29 @@ class MemoryDistributionMetricTypeTest {
|
|||
metric.testHasValue())
|
||||
}
|
||||
|
||||
@Test(expected = NullPointerException::class)
|
||||
@Test
|
||||
fun `testGetValue() throws NullPointerException if nothing is stored`() {
|
||||
// Define a memory distribution metric which will be stored in "store1"
|
||||
val metric = MemoryDistributionMetricType(
|
||||
val metric = MemoryDistributionMetricType(CommonMetricData(
|
||||
disabled = false,
|
||||
category = "telemetry",
|
||||
lifetime = Lifetime.Ping,
|
||||
lifetime = Lifetime.PING,
|
||||
name = "memory_distribution",
|
||||
sendInPings = listOf("store1"),
|
||||
memoryUnit = MemoryUnit.Kilobyte
|
||||
)
|
||||
metric.testGetValue()
|
||||
), memoryUnit = MemoryUnit.KILOBYTE)
|
||||
assertNull(metric.testGetValue())
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `The API saves to secondary pings`() {
|
||||
// Define a memory distribution metric which will be stored in multiple stores
|
||||
val metric = MemoryDistributionMetricType(
|
||||
val metric = MemoryDistributionMetricType(CommonMetricData(
|
||||
disabled = false,
|
||||
category = "telemetry",
|
||||
lifetime = Lifetime.Ping,
|
||||
lifetime = Lifetime.PING,
|
||||
name = "memory_distribution",
|
||||
sendInPings = listOf("store1", "store2", "store3"),
|
||||
memoryUnit = MemoryUnit.Kilobyte
|
||||
)
|
||||
), memoryUnit = MemoryUnit.KILOBYTE)
|
||||
|
||||
// Accumulate a few values
|
||||
for (i in 1L..3L) {
|
||||
|
@ -136,48 +131,47 @@ class MemoryDistributionMetricTypeTest {
|
|||
|
||||
// Check that data was properly recorded in the second ping.
|
||||
assertTrue(metric.testHasValue("store2"))
|
||||
val snapshot = metric.testGetValue("store2")
|
||||
val snapshot = metric.testGetValue("store2")!!
|
||||
// Check the sum
|
||||
assertEquals(6144L, snapshot.sum)
|
||||
// Check that the 1L fell into the first bucket
|
||||
assertEquals(1L, snapshot.values[1023])
|
||||
assertEquals(1L, snapshot.values["1023"])
|
||||
// Check that the 2L fell into the second bucket
|
||||
assertEquals(1L, snapshot.values[2047])
|
||||
assertEquals(1L, snapshot.values["2047"])
|
||||
// Check that the 3L fell into the third bucket
|
||||
assertEquals(1L, snapshot.values[3024])
|
||||
assertEquals(1L, snapshot.values["3024"])
|
||||
|
||||
// Check that data was properly recorded in the third ping.
|
||||
assertTrue(metric.testHasValue("store3"))
|
||||
val snapshot2 = metric.testGetValue("store3")
|
||||
val snapshot2 = metric.testGetValue("store3")!!
|
||||
// Check the sum
|
||||
assertEquals(6144L, snapshot2.sum)
|
||||
// Check that the 1L fell into the first bucket
|
||||
assertEquals(1L, snapshot2.values[1023])
|
||||
assertEquals(1L, snapshot2.values["1023"])
|
||||
// Check that the 2L fell into the second bucket
|
||||
assertEquals(1L, snapshot2.values[2047])
|
||||
assertEquals(1L, snapshot2.values["2047"])
|
||||
// Check that the 3L fell into the third bucket
|
||||
assertEquals(1L, snapshot2.values[3024])
|
||||
assertEquals(1L, snapshot2.values["3024"])
|
||||
}
|
||||
|
||||
@Test
|
||||
fun `The accumulateSamples API correctly stores memory values`() {
|
||||
// Define a memory distribution metric which will be stored in multiple stores
|
||||
val metric = MemoryDistributionMetricType(
|
||||
val metric = MemoryDistributionMetricType(CommonMetricData(
|
||||
disabled = false,
|
||||
category = "telemetry",
|
||||
lifetime = Lifetime.Ping,
|
||||
lifetime = Lifetime.PING,
|
||||
name = "memory_distribution_samples",
|
||||
sendInPings = listOf("store1"),
|
||||
memoryUnit = MemoryUnit.Kilobyte
|
||||
)
|
||||
), memoryUnit = MemoryUnit.KILOBYTE)
|
||||
|
||||
// Accumulate a few values
|
||||
val testSamples = (1L..3L).toList().toLongArray()
|
||||
val testSamples = (1L..3L).toList()
|
||||
metric.accumulateSamples(testSamples)
|
||||
|
||||
// Check that data was properly recorded in the second ping.
|
||||
assertTrue(metric.testHasValue("store1"))
|
||||
val snapshot = metric.testGetValue("store1")
|
||||
val snapshot = metric.testGetValue("store1")!!
|
||||
// Check the sum
|
||||
val kb = 1024L
|
||||
assertEquals(6L * kb, snapshot.sum)
|
||||
|
@ -186,8 +180,8 @@ class MemoryDistributionMetricTypeTest {
|
|||
// These numbers are a bit magic, but they correspond to
|
||||
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`,
|
||||
// which lives in the Rust code.
|
||||
assertEquals(1L, snapshot.values[1023])
|
||||
assertEquals(1L, snapshot.values[2047])
|
||||
assertEquals(1L, snapshot.values[3024])
|
||||
assertEquals(1L, snapshot.values["1023"])
|
||||
assertEquals(1L, snapshot.values["2047"])
|
||||
assertEquals(1L, snapshot.values["3024"])
|
||||
}
|
||||
}
|
||||
|
|
|
@ -330,7 +330,7 @@ impl Glean {
|
|||
.and_then(|database| database.file_size())
|
||||
{
|
||||
log::trace!("Database file size: {}", size.get());
|
||||
self.database_metrics.size.accumulate(self, size.get())
|
||||
self.database_metrics.size.accumulate_sync(self, size.get() as i64)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -283,3 +283,39 @@ interface QuantityMetric {
|
|||
|
||||
i32 test_get_num_recorded_errors(ErrorType error, optional string? ping_name = null);
|
||||
};
|
||||
|
||||
// A snapshot of all buckets and the accumulated sum of a distribution.
|
||||
dictionary DistributionData {
|
||||
// A map containig the bucket index mapped to the accumulated count.
|
||||
//
|
||||
// This can contain buckets with a count of `0`.
|
||||
//
|
||||
// Note: UniFFI does not support integer keys right now.
|
||||
record<DOMString, i64> values;
|
||||
|
||||
// The accumulated sum of all the samples in the distribution.
|
||||
i64 sum;
|
||||
};
|
||||
|
||||
enum MemoryUnit {
|
||||
// 1 byte
|
||||
"Byte",
|
||||
// 2^10 bytes
|
||||
"Kilobyte",
|
||||
// 2^20 bytes
|
||||
"Megabyte",
|
||||
// 2^30 bytes
|
||||
"Gigabyte",
|
||||
};
|
||||
|
||||
interface MemoryDistributionMetric {
|
||||
constructor(CommonMetricData meta, MemoryUnit memory_unit);
|
||||
|
||||
void accumulate(i64 sample);
|
||||
|
||||
void accumulate_samples(sequence<i64> samples);
|
||||
|
||||
DistributionData? test_get_value(optional string? ping_name = null);
|
||||
|
||||
i32 test_get_num_recorded_errors(ErrorType error, optional string? ping_name = null);
|
||||
};
|
||||
|
|
|
@ -52,6 +52,9 @@ pub use crate::metrics::labeled::{LabeledBoolean, LabeledCounter, LabeledString}
|
|||
pub use crate::metrics::{
|
||||
BooleanMetric, CounterMetric, PingType, QuantityMetric, RecordedExperiment, StringListMetric,
|
||||
StringMetric, TimeUnit, TimespanMetric, UrlMetric, UuidMetric,
|
||||
MemoryDistributionMetric,
|
||||
DistributionData,
|
||||
MemoryUnit
|
||||
};
|
||||
pub use crate::upload::{PingRequest, PingUploadTask, UploadResult};
|
||||
|
||||
|
|
|
@ -26,8 +26,8 @@ pub struct CustomDistributionMetric {
|
|||
/// The snapshot can be serialized into the payload format.
|
||||
pub(crate) fn snapshot<B: Bucketing>(hist: &Histogram<B>) -> DistributionData {
|
||||
DistributionData {
|
||||
values: hist.snapshot_values(),
|
||||
sum: hist.sum(),
|
||||
values: hist.snapshot_values().into_iter().map(|(k, v)| (k.to_string(), v as i64)).collect(),
|
||||
sum: hist.sum() as i64,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,9 @@
|
|||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
use crate::error_recording::{record_error, ErrorType};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::error_recording::{record_error, test_get_num_recorded_errors, ErrorType};
|
||||
use crate::histogram::{Functional, Histogram};
|
||||
use crate::metrics::memory_unit::MemoryUnit;
|
||||
use crate::metrics::{DistributionData, Metric, MetricType};
|
||||
|
@ -23,9 +25,9 @@ const MAX_BYTES: u64 = 1 << 40;
|
|||
/// A memory distribution metric.
|
||||
///
|
||||
/// Memory distributions are used to accumulate and store memory sizes.
|
||||
#[derive(Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MemoryDistributionMetric {
|
||||
meta: CommonMetricData,
|
||||
meta: Arc<CommonMetricData>,
|
||||
memory_unit: MemoryUnit,
|
||||
}
|
||||
|
||||
|
@ -36,8 +38,12 @@ pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
|
|||
DistributionData {
|
||||
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
|
||||
// specialized snapshot function.
|
||||
values: hist.snapshot(),
|
||||
sum: hist.sum(),
|
||||
values: hist
|
||||
.snapshot()
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v as i64))
|
||||
.collect(),
|
||||
sum: hist.sum() as i64,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,7 +60,10 @@ impl MetricType for MemoryDistributionMetric {
|
|||
impl MemoryDistributionMetric {
|
||||
/// Creates a new memory distribution metric.
|
||||
pub fn new(meta: CommonMetricData, memory_unit: MemoryUnit) -> Self {
|
||||
Self { meta, memory_unit }
|
||||
Self {
|
||||
meta: Arc::new(meta),
|
||||
memory_unit,
|
||||
}
|
||||
}
|
||||
|
||||
/// Accumulates the provided sample in the metric.
|
||||
|
@ -68,12 +77,32 @@ impl MemoryDistributionMetric {
|
|||
///
|
||||
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
|
||||
/// and an [`ErrorType::InvalidValue`] error is recorded.
|
||||
pub fn accumulate(&self, glean: &Glean, sample: u64) {
|
||||
pub fn accumulate(&self, sample: i64) {
|
||||
let metric = self.clone();
|
||||
crate::launch_with_glean(move |glean| metric.accumulate_sync(glean, sample))
|
||||
}
|
||||
|
||||
/// Accumulates the provided sample in the metric synchronously.
|
||||
///
|
||||
/// See [`accumulate`](Self::accumulate) for details.
|
||||
#[doc(hidden)]
|
||||
pub fn accumulate_sync(&self, glean: &Glean, sample: i64) {
|
||||
if !self.should_record(glean) {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut sample = self.memory_unit.as_bytes(sample);
|
||||
if sample < 0 {
|
||||
record_error(
|
||||
glean,
|
||||
&self.meta,
|
||||
ErrorType::InvalidValue,
|
||||
"Accumulated a negative sample",
|
||||
None,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let mut sample = self.memory_unit.as_bytes(sample as u64);
|
||||
|
||||
if sample > MAX_BYTES {
|
||||
let msg = "Sample is bigger than 1 terabyte";
|
||||
|
@ -119,7 +148,16 @@ impl MemoryDistributionMetric {
|
|||
///
|
||||
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
|
||||
/// and an [`ErrorType::InvalidValue`] error is recorded.
|
||||
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
|
||||
pub fn accumulate_samples(&self, samples: Vec<i64>) {
|
||||
let metric = self.clone();
|
||||
crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, samples))
|
||||
}
|
||||
|
||||
/// Accumulates the provided signed samples in the metric synchronously.
|
||||
///
|
||||
/// See [`accumulate_samples`](Self::accumulate_samples) for details.
|
||||
#[doc(hidden)]
|
||||
pub fn accumulate_samples_sync(&self, glean: &Glean, samples: Vec<i64>) {
|
||||
if !self.should_record(glean) {
|
||||
return;
|
||||
}
|
||||
|
@ -176,15 +214,20 @@ impl MemoryDistributionMetric {
|
|||
}
|
||||
}
|
||||
|
||||
/// **Test-only API (exported for FFI purposes).**
|
||||
///
|
||||
/// Gets the currently stored value as an integer.
|
||||
///
|
||||
/// This doesn't clear the stored value.
|
||||
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
|
||||
/// Gets the currently stored value synchronously.
|
||||
#[doc(hidden)]
|
||||
pub fn get_value<'a, S: Into<Option<&'a str>>>(
|
||||
&self,
|
||||
glean: &Glean,
|
||||
ping_name: S,
|
||||
) -> Option<DistributionData> {
|
||||
let queried_ping_name = ping_name
|
||||
.into()
|
||||
.unwrap_or_else(|| &self.meta().send_in_pings[0]);
|
||||
|
||||
match StorageManager.snapshot_metric_for_test(
|
||||
glean.storage(),
|
||||
storage_name,
|
||||
queried_ping_name,
|
||||
&self.meta.identifier(glean),
|
||||
self.meta.lifetime,
|
||||
) {
|
||||
|
@ -195,15 +238,33 @@ impl MemoryDistributionMetric {
|
|||
|
||||
/// **Test-only API (exported for FFI purposes).**
|
||||
///
|
||||
/// Gets the currently-stored histogram as a JSON String of the serialized value.
|
||||
/// Gets the currently stored value.
|
||||
///
|
||||
/// This doesn't clear the stored value.
|
||||
pub fn test_get_value_as_json_string(
|
||||
&self,
|
||||
glean: &Glean,
|
||||
storage_name: &str,
|
||||
) -> Option<String> {
|
||||
self.test_get_value(glean, storage_name)
|
||||
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
|
||||
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<DistributionData> {
|
||||
crate::block_on_dispatcher();
|
||||
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
|
||||
}
|
||||
|
||||
/// **Exported for test purposes.**
|
||||
///
|
||||
/// Gets the number of recorded errors for the given metric and error type.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `error` - The type of error
|
||||
/// * `ping_name` - represents the optional name of the ping to retrieve the
|
||||
/// metric for. Defaults to the first value in `send_in_pings`.
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The number of errors reported.
|
||||
pub fn test_get_num_recorded_errors(&self, error: ErrorType, ping_name: Option<String>) -> i32 {
|
||||
crate::block_on_dispatcher();
|
||||
|
||||
crate::core::with_glean(|glean| {
|
||||
test_get_num_recorded_errors(glean, self.meta(), error, ping_name.as_deref())
|
||||
.unwrap_or(0)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,10 +69,10 @@ pub struct DistributionData {
|
|||
/// A map containig the bucket index mapped to the accumulated count.
|
||||
///
|
||||
/// This can contain buckets with a count of `0`.
|
||||
pub values: HashMap<u64, u64>,
|
||||
pub values: HashMap<String, i64>,
|
||||
|
||||
/// The accumulated sum of all the samples in the distribution.
|
||||
pub sum: u64,
|
||||
pub sum: i64,
|
||||
}
|
||||
|
||||
/// The available metrics.
|
||||
|
|
|
@ -108,8 +108,8 @@ pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
|
|||
DistributionData {
|
||||
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
|
||||
// specialized snapshot function.
|
||||
values: hist.snapshot(),
|
||||
sum: hist.sum(),
|
||||
values: hist.snapshot().into_iter().map(|(k, v)| (k.to_string(), v as i64)).collect(),
|
||||
sum: hist.sum() as i64,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -327,7 +327,7 @@ impl PingUploadManager {
|
|||
if let ErrorKind::PingBodyOverflow(s) = e.kind() {
|
||||
self.upload_metrics
|
||||
.discarded_exceeding_pings_size
|
||||
.accumulate(glean, *s as u64 / 1024);
|
||||
.accumulate_sync(glean, *s as i64 / 1024);
|
||||
}
|
||||
|
||||
None
|
||||
|
@ -448,7 +448,7 @@ impl PingUploadManager {
|
|||
cached_pings.pending_pings.reverse();
|
||||
self.upload_metrics
|
||||
.pending_pings_directory_size
|
||||
.accumulate(glean, pending_pings_directory_size as u64 / 1024);
|
||||
.accumulate_sync(glean, pending_pings_directory_size as i64 / 1024);
|
||||
|
||||
// Enqueue the remaining pending pings and
|
||||
// enqueue all deletion-request pings.
|
||||
|
|
|
@ -37,10 +37,10 @@ fn serializer_should_correctly_serialize_memory_distribution() {
|
|||
memory_unit,
|
||||
);
|
||||
|
||||
metric.accumulate(&glean, 100_000);
|
||||
metric.accumulate_sync(&glean, 100_000);
|
||||
|
||||
let snapshot = metric
|
||||
.test_get_value(&glean, "store1")
|
||||
.get_value(&glean, "store1")
|
||||
.expect("Value should be stored");
|
||||
|
||||
assert_eq!(snapshot.sum, 100_000 * kb);
|
||||
|
@ -78,7 +78,7 @@ fn set_value_properly_sets_the_value_in_all_stores() {
|
|||
MemoryUnit::Byte,
|
||||
);
|
||||
|
||||
metric.accumulate(&glean, 100_000);
|
||||
metric.accumulate_sync(&glean, 100_000);
|
||||
|
||||
for store_name in store_names {
|
||||
let snapshot = StorageManager
|
||||
|
@ -117,10 +117,10 @@ fn the_accumulate_samples_api_correctly_stores_memory_values() {
|
|||
|
||||
// Accumulate the samples. We intentionally do not report
|
||||
// negative values to not trigger error reporting.
|
||||
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
|
||||
metric.accumulate_samples_sync(&glean, [1, 2, 3].to_vec());
|
||||
|
||||
let snapshot = metric
|
||||
.test_get_value(&glean, "store1")
|
||||
.get_value(&glean, "store1")
|
||||
.expect("Value should be stored");
|
||||
|
||||
let kb = 1024;
|
||||
|
@ -131,9 +131,9 @@ fn the_accumulate_samples_api_correctly_stores_memory_values() {
|
|||
// We should get a sample in 3 buckets.
|
||||
// These numbers are a bit magic, but they correspond to
|
||||
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
|
||||
assert_eq!(1, snapshot.values[&1023]);
|
||||
assert_eq!(1, snapshot.values[&2047]);
|
||||
assert_eq!(1, snapshot.values[&3024]);
|
||||
assert_eq!(1, snapshot.values["1023"]);
|
||||
assert_eq!(1, snapshot.values["2047"]);
|
||||
assert_eq!(1, snapshot.values["3024"]);
|
||||
|
||||
// No errors should be reported.
|
||||
assert!(test_get_num_recorded_errors(
|
||||
|
@ -162,10 +162,10 @@ fn the_accumulate_samples_api_correctly_handles_negative_values() {
|
|||
);
|
||||
|
||||
// Accumulate the samples.
|
||||
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
|
||||
metric.accumulate_samples_sync(&glean, [-1, 1, 2, 3].to_vec());
|
||||
|
||||
let snapshot = metric
|
||||
.test_get_value(&glean, "store1")
|
||||
.get_value(&glean, "store1")
|
||||
.expect("Value should be stored");
|
||||
|
||||
let kb = 1024;
|
||||
|
@ -176,9 +176,9 @@ fn the_accumulate_samples_api_correctly_handles_negative_values() {
|
|||
// We should get a sample in 3 buckets.
|
||||
// These numbers are a bit magic, but they correspond to
|
||||
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
|
||||
assert_eq!(1, snapshot.values[&1023]);
|
||||
assert_eq!(1, snapshot.values[&2047]);
|
||||
assert_eq!(1, snapshot.values[&3024]);
|
||||
assert_eq!(1, snapshot.values["1023"]);
|
||||
assert_eq!(1, snapshot.values["2047"]);
|
||||
assert_eq!(1, snapshot.values["3024"]);
|
||||
|
||||
// 1 error should be reported.
|
||||
assert_eq!(
|
||||
|
|
Загрузка…
Ссылка в новой задаче