Backed out 15 changesets (bug 1662868) for causing xpcshell thread leaks.

CLOSED TREE

Backed out changeset fe1462617a8d (bug 1662868)
Backed out changeset 99df04f55be1 (bug 1662868)
Backed out changeset 21f7b675b3b9 (bug 1662868)
Backed out changeset 33a5ec2378cd (bug 1662868)
Backed out changeset be7a168ee182 (bug 1662868)
Backed out changeset 1803b5acd0f8 (bug 1662868)
Backed out changeset 98415dd8ee7e (bug 1662868)
Backed out changeset 6e38bfcb8587 (bug 1662868)
Backed out changeset 43245bce408a (bug 1662868)
Backed out changeset b727f5c658f5 (bug 1662868)
Backed out changeset f631a1d04d0a (bug 1662868)
Backed out changeset 6a5e0257086c (bug 1662868)
Backed out changeset 82c6c1b7a24a (bug 1662868)
Backed out changeset 6f4740140fe1 (bug 1662868)
Backed out changeset 2f342eaea13e (bug 1662868)
This commit is contained in:
Mihai Alexandru Michis 2020-12-02 18:24:59 +02:00
Родитель ec37b8925b
Коммит d2849190f6
124 изменённых файлов: 19551 добавлений и 20035 удалений

10
Cargo.lock сгенерированный
Просмотреть файл

@ -1545,7 +1545,6 @@ dependencies = [
"ffi-support",
"glean",
"glean-core",
"inherent",
"log",
"nsstring",
"once_cell",
@ -2079,9 +2078,9 @@ dependencies = [
[[package]]
name = "glean"
version = "33.5.0"
version = "33.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1d5556ec294a763121f57384cf09be9b7f8eebbfc075040f9120b84f6a1160b"
checksum = "8f52254ae2baf857eec45b424a0d2dfe6ac63f353b594cfa4bee033f8386b25c"
dependencies = [
"crossbeam-channel",
"glean-core",
@ -2091,14 +2090,13 @@ dependencies = [
"serde",
"serde_json",
"thiserror",
"uuid",
]
[[package]]
name = "glean-core"
version = "33.5.0"
version = "33.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "657786648aaf8df52b059cb5cedff3be250df149bd8f6364919166e9d02a398d"
checksum = "82e3c82289c1ce270c1accc086f9cac083c15ddc01d167e9c6e0b8ed32a4b47c"
dependencies = [
"bincode",
"chrono",

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

22
third_party/rust/glean-core/Cargo.lock сгенерированный поставляемый
Просмотреть файл

@ -53,9 +53,9 @@ checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
[[package]]
name = "cc"
version = "1.0.65"
version = "1.0.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95752358c8f7552394baf48cd82695b345628ad3f170d607de3ca03b8dacca15"
checksum = "f1770ced377336a88a67c473594ccc14eca6f4559217c34f64aac8f83d641b40"
[[package]]
name = "cfg-if"
@ -180,7 +180,7 @@ dependencies = [
[[package]]
name = "glean-core"
version = "33.5.0"
version = "33.4.0"
dependencies = [
"bincode",
"chrono",
@ -543,9 +543,9 @@ dependencies = [
[[package]]
name = "syn"
version = "1.0.53"
version = "1.0.48"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8833e20724c24de12bbaba5ad230ea61c3eafb05b881c7c9d3cfe8638b187e68"
checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac"
dependencies = [
"proc-macro2",
"quote",
@ -580,9 +580,9 @@ dependencies = [
[[package]]
name = "termcolor"
version = "1.1.2"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f"
dependencies = [
"winapi-util",
]
@ -600,9 +600,9 @@ dependencies = [
[[package]]
name = "tinyvec"
version = "1.1.0"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f"
checksum = "b78a366903f506d2ad52ca8dc552102ffdd3e937ba8a227f024dc1d1eae28575"
dependencies = [
"tinyvec_macros",
]
@ -624,9 +624,9 @@ dependencies = [
[[package]]
name = "unicode-normalization"
version = "0.1.16"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606"
checksum = "b7f98e67a4d84f730d343392f9bfff7d21e3fca562b9cb7a43b768350beeddc6"
dependencies = [
"tinyvec",
]

2
third_party/rust/glean-core/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "glean-core"
version = "33.5.0"
version = "33.4.0"
authors = ["Jan-Erik Rediger <jrediger@mozilla.com>", "The Glean Team <glean-team@mozilla.com>"]
include = ["/README.md", "/LICENSE", "/src", "/examples", "/tests", "/Cargo.toml"]
description = "A modern Telemetry library"

154
third_party/rust/glean-core/examples/sample.rs поставляемый
Просмотреть файл

@ -1,77 +1,77 @@
use std::env;
use glean_core::metrics::*;
use glean_core::ping::PingMaker;
use glean_core::{CommonMetricData, Glean};
use tempfile::Builder;
fn main() {
env_logger::init();
let mut args = env::args().skip(1);
let data_path = if let Some(path) = args.next() {
path
} else {
let root = Builder::new().prefix("simple-db").tempdir().unwrap();
root.path().display().to_string()
};
let cfg = glean_core::Configuration {
data_path,
application_id: "org.mozilla.glean_core.example".into(),
language_binding_name: "Rust".into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
};
let mut glean = Glean::new(cfg).unwrap();
glean.register_ping_type(&PingType::new("baseline", true, false, vec![]));
glean.register_ping_type(&PingType::new("metrics", true, false, vec![]));
let local_metric: StringMetric = StringMetric::new(CommonMetricData {
name: "local_metric".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
});
let call_counter: CounterMetric = CounterMetric::new(CommonMetricData {
name: "calls".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into(), "metrics".into()],
..Default::default()
});
local_metric.set(&glean, "I can set this");
call_counter.add(&glean, 1);
println!("Baseline Data:\n{}", glean.snapshot("baseline", true));
call_counter.add(&glean, 2);
println!("Metrics Data:\n{}", glean.snapshot("metrics", true));
call_counter.add(&glean, 3);
println!();
println!("Baseline Data 2:\n{}", glean.snapshot("baseline", false));
println!("Metrics Data 2:\n{}", glean.snapshot("metrics", true));
let list: StringListMetric = StringListMetric::new(CommonMetricData {
name: "list".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
});
list.add(&glean, "once");
list.add(&glean, "upon");
let ping_maker = PingMaker::new();
let ping = ping_maker
.collect_string(&glean, glean.get_ping_by_name("baseline").unwrap(), None)
.unwrap();
println!("Baseline Ping:\n{}", ping);
let ping = ping_maker.collect_string(&glean, glean.get_ping_by_name("metrics").unwrap(), None);
println!("Metrics Ping: {:?}", ping);
}
use std::env;
use glean_core::metrics::*;
use glean_core::ping::PingMaker;
use glean_core::{CommonMetricData, Glean};
use tempfile::Builder;
fn main() {
env_logger::init();
let mut args = env::args().skip(1);
let data_path = if let Some(path) = args.next() {
path
} else {
let root = Builder::new().prefix("simple-db").tempdir().unwrap();
root.path().display().to_string()
};
let cfg = glean_core::Configuration {
data_path,
application_id: "org.mozilla.glean_core.example".into(),
language_binding_name: "Rust".into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
};
let mut glean = Glean::new(cfg).unwrap();
glean.register_ping_type(&PingType::new("baseline", true, false, vec![]));
glean.register_ping_type(&PingType::new("metrics", true, false, vec![]));
let local_metric: StringMetric = StringMetric::new(CommonMetricData {
name: "local_metric".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
});
let call_counter: CounterMetric = CounterMetric::new(CommonMetricData {
name: "calls".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into(), "metrics".into()],
..Default::default()
});
local_metric.set(&glean, "I can set this");
call_counter.add(&glean, 1);
println!("Baseline Data:\n{}", glean.snapshot("baseline", true));
call_counter.add(&glean, 2);
println!("Metrics Data:\n{}", glean.snapshot("metrics", true));
call_counter.add(&glean, 3);
println!();
println!("Baseline Data 2:\n{}", glean.snapshot("baseline", false));
println!("Metrics Data 2:\n{}", glean.snapshot("metrics", true));
let list: StringListMetric = StringListMetric::new(CommonMetricData {
name: "list".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
});
list.add(&glean, "once");
list.add(&glean, "upon");
let ping_maker = PingMaker::new();
let ping = ping_maker
.collect_string(&glean, glean.get_ping_by_name("baseline").unwrap(), None)
.unwrap();
println!("Baseline Ping:\n{}", ping);
let ping = ping_maker.collect_string(&glean, glean.get_ping_by_name("metrics").unwrap(), None);
println!("Metrics Ping: {:?}", ping);
}

Просмотреть файл

@ -1,127 +1,127 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use crate::error::{Error, ErrorKind};
use crate::metrics::dynamic_label;
use crate::Glean;
/// The supported metrics' lifetimes.
///
/// A metric's lifetime determines when its stored data gets reset.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(i32)] // Use i32 to be compatible with our JNA definition
pub enum Lifetime {
/// The metric is reset with each sent ping
Ping,
/// The metric is reset on application restart
Application,
/// The metric is reset with each user profile
User,
}
impl Default for Lifetime {
fn default() -> Self {
Lifetime::Ping
}
}
impl Lifetime {
/// String representation of the lifetime.
pub fn as_str(self) -> &'static str {
match self {
Lifetime::Ping => "ping",
Lifetime::Application => "app",
Lifetime::User => "user",
}
}
}
impl TryFrom<i32> for Lifetime {
type Error = Error;
fn try_from(value: i32) -> Result<Lifetime, Self::Error> {
match value {
0 => Ok(Lifetime::Ping),
1 => Ok(Lifetime::Application),
2 => Ok(Lifetime::User),
e => Err(ErrorKind::Lifetime(e).into()),
}
}
}
/// The common set of data shared across all different metric types.
#[derive(Default, Debug, Clone)]
pub struct CommonMetricData {
/// The metric's name.
pub name: String,
/// The metric's category.
pub category: String,
/// List of ping names to include this metric in.
pub send_in_pings: Vec<String>,
/// The metric's lifetime.
pub lifetime: Lifetime,
/// Whether or not the metric is disabled.
///
/// Disabled metrics are never recorded.
pub disabled: bool,
/// Dynamic label.
/// When a LabeledMetric<T> factory creates the specific metric to be
/// recorded to, dynamic labels are stored in the specific label so that we
/// can validate them when the Glean singleton is available.
pub dynamic_label: Option<String>,
}
impl CommonMetricData {
/// Creates a new metadata object.
pub fn new<A: Into<String>, B: Into<String>, C: Into<String>>(
category: A,
name: B,
ping_name: C,
) -> CommonMetricData {
CommonMetricData {
name: name.into(),
category: category.into(),
send_in_pings: vec![ping_name.into()],
..Default::default()
}
}
/// The metric's base identifier, including the category and name, but not the label.
///
/// If `category` is empty, it's ommitted.
/// Otherwise, it's the combination of the metric's `category` and `name`.
pub(crate) fn base_identifier(&self) -> String {
if self.category.is_empty() {
self.name.clone()
} else {
format!("{}.{}", self.category, self.name)
}
}
/// The metric's unique identifier, including the category, name and label.
///
/// If `category` is empty, it's ommitted.
/// Otherwise, it's the combination of the metric's `category`, `name` and `label`.
pub(crate) fn identifier(&self, glean: &Glean) -> String {
let base_identifier = self.base_identifier();
if let Some(label) = &self.dynamic_label {
dynamic_label(glean, self, &base_identifier, label)
} else {
base_identifier
}
}
/// Whether this metric should be recorded.
pub fn should_record(&self) -> bool {
!self.disabled
}
/// The list of storages this metric should be recorded into.
pub fn storage_names(&self) -> &[String] {
&self.send_in_pings
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use crate::error::{Error, ErrorKind};
use crate::metrics::dynamic_label;
use crate::Glean;
/// The supported metrics' lifetimes.
///
/// A metric's lifetime determines when its stored data gets reset.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(i32)] // Use i32 to be compatible with our JNA definition
pub enum Lifetime {
/// The metric is reset with each sent ping
Ping,
/// The metric is reset on application restart
Application,
/// The metric is reset with each user profile
User,
}
impl Default for Lifetime {
fn default() -> Self {
Lifetime::Ping
}
}
impl Lifetime {
/// String representation of the lifetime.
pub fn as_str(self) -> &'static str {
match self {
Lifetime::Ping => "ping",
Lifetime::Application => "app",
Lifetime::User => "user",
}
}
}
impl TryFrom<i32> for Lifetime {
type Error = Error;
fn try_from(value: i32) -> Result<Lifetime, Self::Error> {
match value {
0 => Ok(Lifetime::Ping),
1 => Ok(Lifetime::Application),
2 => Ok(Lifetime::User),
e => Err(ErrorKind::Lifetime(e).into()),
}
}
}
/// The common set of data shared across all different metric types.
#[derive(Default, Debug, Clone)]
pub struct CommonMetricData {
/// The metric's name.
pub name: String,
/// The metric's category.
pub category: String,
/// List of ping names to include this metric in.
pub send_in_pings: Vec<String>,
/// The metric's lifetime.
pub lifetime: Lifetime,
/// Whether or not the metric is disabled.
///
/// Disabled metrics are never recorded.
pub disabled: bool,
/// Dynamic label.
/// When a LabeledMetric<T> factory creates the specific metric to be
/// recorded to, dynamic labels are stored in the specific label so that we
/// can validate them when the Glean singleton is available.
pub dynamic_label: Option<String>,
}
impl CommonMetricData {
/// Creates a new metadata object.
pub fn new<A: Into<String>, B: Into<String>, C: Into<String>>(
category: A,
name: B,
ping_name: C,
) -> CommonMetricData {
CommonMetricData {
name: name.into(),
category: category.into(),
send_in_pings: vec![ping_name.into()],
..Default::default()
}
}
/// The metric's base identifier, including the category and name, but not the label.
///
/// If `category` is empty, it's ommitted.
/// Otherwise, it's the combination of the metric's `category` and `name`.
pub(crate) fn base_identifier(&self) -> String {
if self.category.is_empty() {
self.name.clone()
} else {
format!("{}.{}", self.category, self.name)
}
}
/// The metric's unique identifier, including the category, name and label.
///
/// If `category` is empty, it's ommitted.
/// Otherwise, it's the combination of the metric's `category`, `name` and `label`.
pub(crate) fn identifier(&self, glean: &Glean) -> String {
let base_identifier = self.base_identifier();
if let Some(label) = &self.dynamic_label {
dynamic_label(glean, self, &base_identifier, label)
} else {
base_identifier
}
}
/// Whether this metric should be recorded.
pub fn should_record(&self) -> bool {
!self.disabled
}
/// The list of storages this metric should be recorded into.
pub fn storage_names(&self) -> &[String] {
&self.send_in_pings
}
}

3144
third_party/rust/glean-core/src/database/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

642
third_party/rust/glean-core/src/debug.rs поставляемый
Просмотреть файл

@ -1,321 +1,321 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! # Debug options
//!
//! The debug options for Glean may be set by calling one of the `set_*` functions
//! or by setting specific environment variables.
//!
//! The environment variables will be read only once when the options are initialized.
//!
//! The possible debugging features available out of the box are:
//!
//! * **Ping logging** - logging the contents of ping requests that are correctly assembled;
//! This may be set by calling glean.set_log_pings(value: bool)
//! or by setting the environment variable GLEAN_LOG_PINGS="true";
//! * **Debug tagging** - Adding the X-Debug-ID header to every ping request,
//! allowing these tagged pings to be sent to the ["Ping Debug Viewer"](https://mozilla.github.io/glean/book/dev/core/internal/debug-pings.html).
//! This may be set by calling glean.set_debug_view_tag(value: &str)
//! or by setting the environment variable GLEAN_DEBUG_VIEW_TAG=<some tag>;
//! * **Source tagging** - Adding the X-Source-Tags header to every ping request,
//! allowing pings to be tagged with custom labels.
//! This may be set by calling glean.set_source_tags(value: Vec<String>)
//! or by setting the environment variable GLEAN_SOURCE_TAGS=<some, tags>;
//!
//! Bindings may implement other debugging features, e.g. sending pings on demand.
use std::env;
const GLEAN_LOG_PINGS: &str = "GLEAN_LOG_PINGS";
const GLEAN_DEBUG_VIEW_TAG: &str = "GLEAN_DEBUG_VIEW_TAG";
const GLEAN_SOURCE_TAGS: &str = "GLEAN_SOURCE_TAGS";
const GLEAN_MAX_SOURCE_TAGS: usize = 5;
/// A representation of all of Glean's debug options.
pub struct DebugOptions {
/// Option to log the payload of pings that are successfully assembled into a ping request.
pub log_pings: DebugOption<bool>,
/// Option to add the X-Debug-ID header to every ping request.
pub debug_view_tag: DebugOption<String>,
/// Option to add the X-Source-Tags header to ping requests. This will allow the data
/// consumers to classify data depending on the applied tags.
pub source_tags: DebugOption<Vec<String>>,
}
impl std::fmt::Debug for DebugOptions {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("DebugOptions")
.field("log_pings", &self.log_pings.get())
.field("debug_view_tag", &self.debug_view_tag.get())
.field("source_tags", &self.source_tags.get())
.finish()
}
}
impl DebugOptions {
pub fn new() -> Self {
Self {
log_pings: DebugOption::new(GLEAN_LOG_PINGS, get_bool_from_str, None),
debug_view_tag: DebugOption::new(GLEAN_DEBUG_VIEW_TAG, Some, Some(validate_tag)),
source_tags: DebugOption::new(
GLEAN_SOURCE_TAGS,
tokenize_string,
Some(validate_source_tags),
),
}
}
}
/// A representation of a debug option,
/// where the value can be set programmatically or come from an environment variable.
#[derive(Debug)]
pub struct DebugOption<T, E = fn(String) -> Option<T>, V = fn(&T) -> bool> {
/// The name of the environment variable related to this debug option.
env: String,
/// The actual value of this option.
value: Option<T>,
/// Function to extract the data of type `T` from a `String`, used when
/// extracting data from the environment.
extraction: E,
/// Optional function to validate the value parsed from the environment
/// or passed to the `set` function.
validation: Option<V>,
}
impl<T, E, V> DebugOption<T, E, V>
where
T: Clone,
E: Fn(String) -> Option<T>,
V: Fn(&T) -> bool,
{
/// Creates a new debug option.
///
/// Tries to get the initial value of the option from the environment.
pub fn new(env: &str, extraction: E, validation: Option<V>) -> Self {
let mut option = Self {
env: env.into(),
value: None,
extraction,
validation,
};
option.set_from_env();
option
}
fn validate(&self, value: &T) -> bool {
if let Some(f) = self.validation.as_ref() {
f(value)
} else {
true
}
}
fn set_from_env(&mut self) {
let extract = &self.extraction;
match env::var(&self.env) {
Ok(env_value) => match extract(env_value.clone()) {
Some(v) => {
self.set(v);
}
None => {
log::error!(
"Unable to parse debug option {}={} into {}. Ignoring.",
self.env,
env_value,
std::any::type_name::<T>()
);
}
},
Err(env::VarError::NotUnicode(_)) => {
log::error!("The value of {} is not valid unicode. Ignoring.", self.env)
}
// The other possible error is that the env var is not set,
// which is not an error for us and can safely be ignored.
Err(_) => {}
}
}
/// Tries to set a value for this debug option.
///
/// Validates the value in case a validation function is available.
///
/// # Returns
///
/// Whether the option passed validation and was succesfully set.
pub fn set(&mut self, value: T) -> bool {
let validated = self.validate(&value);
if validated {
log::info!("Setting the debug option {}.", self.env);
self.value = Some(value);
return true;
}
log::info!("Invalid value for debug option {}.", self.env);
false
}
/// Gets the value of this debug option.
pub fn get(&self) -> Option<&T> {
self.value.as_ref()
}
}
fn get_bool_from_str(value: String) -> Option<bool> {
std::str::FromStr::from_str(&value).ok()
}
fn tokenize_string(value: String) -> Option<Vec<String>> {
let trimmed = value.trim();
if trimmed.is_empty() {
return None;
}
Some(trimmed.split(',').map(|s| s.trim().to_string()).collect())
}
/// A tag is the value used in both the `X-Debug-ID` and `X-Source-Tags` headers
/// of tagged ping requests, thus is it must be a valid header value.
///
/// In other words, it must match the regex: "[a-zA-Z0-9-]{1,20}"
///
/// The regex crate isn't used here because it adds to the binary size,
/// and the Glean SDK doesn't use regular expressions anywhere else.
#[allow(clippy::ptr_arg)]
fn validate_tag(value: &String) -> bool {
if value.is_empty() {
log::error!("A tag must have at least one character.");
return false;
}
let mut iter = value.chars();
let mut count = 0;
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('-') | Some('a'..='z') | Some('A'..='Z') | Some('0'..='9') => (),
// An invalid character
Some(c) => {
log::error!("Invalid character '{}' in the tag.", c);
return false;
}
}
count += 1;
if count == 20 {
log::error!("A tag cannot exceed 20 characters.");
return false;
}
}
}
/// Validate the list of source tags.
///
/// This builds upon the existing `validate_tag` function, since all the
/// tags should respect the same rules to make the pipeline happy.
#[allow(clippy::ptr_arg)]
fn validate_source_tags(tags: &Vec<String>) -> bool {
if tags.is_empty() {
return false;
}
if tags.len() > GLEAN_MAX_SOURCE_TAGS {
log::error!(
"A list of tags cannot contain more than {} elements.",
GLEAN_MAX_SOURCE_TAGS
);
return false;
}
// Filter out tags starting with "glean". They are reserved.
if tags.iter().any(|s| s.starts_with("glean")) {
log::error!("Tags starting with `glean` are reserved and must not be used.");
return false;
}
tags.iter().all(|x| validate_tag(&x))
}
#[cfg(test)]
mod test {
use super::*;
use std::env;
#[test]
fn debug_option_is_correctly_loaded_from_env() {
env::set_var("GLEAN_TEST_1", "test");
let option: DebugOption<String> = DebugOption::new("GLEAN_TEST_1", Some, None);
assert_eq!(option.get().unwrap(), "test");
}
#[test]
fn debug_option_is_correctly_validated_when_necessary() {
#[allow(clippy::ptr_arg)]
fn validate(value: &String) -> bool {
value == "test"
}
// Invalid values from the env are not set
env::set_var("GLEAN_TEST_2", "invalid");
let mut option: DebugOption<String> =
DebugOption::new("GLEAN_TEST_2", Some, Some(validate));
assert!(option.get().is_none());
// Valid values are set using the `set` function
assert!(option.set("test".into()));
assert_eq!(option.get().unwrap(), "test");
// Invalid values are not set using the `set` function
assert!(!option.set("invalid".into()));
assert_eq!(option.get().unwrap(), "test");
}
#[test]
fn tokenize_string_splits_correctly() {
// Valid list is properly tokenized and spaces are trimmed.
assert_eq!(
Some(vec!["test1".to_string(), "test2".to_string()]),
tokenize_string(" test1, test2 ".to_string())
);
// Empty strings return no item.
assert_eq!(None, tokenize_string("".to_string()));
}
#[test]
fn validates_tag_correctly() {
assert!(validate_tag(&"valid-value".to_string()));
assert!(validate_tag(&"-also-valid-value".to_string()));
assert!(!validate_tag(&"invalid_value".to_string()));
assert!(!validate_tag(&"invalid value".to_string()));
assert!(!validate_tag(&"!nv@lid-val*e".to_string()));
assert!(!validate_tag(
&"invalid-value-because-way-too-long".to_string()
));
assert!(!validate_tag(&"".to_string()));
}
#[test]
fn validates_source_tags_correctly() {
// Empty tags.
assert!(!validate_source_tags(&vec!["".to_string()]));
// Too many tags.
assert!(!validate_source_tags(&vec![
"1".to_string(),
"2".to_string(),
"3".to_string(),
"4".to_string(),
"5".to_string(),
"6".to_string()
]));
// Invalid tags.
assert!(!validate_source_tags(&vec!["!nv@lid-val*e".to_string()]));
// Entries starting with 'glean' are filtered out.
assert!(!validate_source_tags(&vec![
"glean-test1".to_string(),
"test2".to_string()
]));
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! # Debug options
//!
//! The debug options for Glean may be set by calling one of the `set_*` functions
//! or by setting specific environment variables.
//!
//! The environment variables will be read only once when the options are initialized.
//!
//! The possible debugging features available out of the box are:
//!
//! * **Ping logging** - logging the contents of ping requests that are correctly assembled;
//! This may be set by calling glean.set_log_pings(value: bool)
//! or by setting the environment variable GLEAN_LOG_PINGS="true";
//! * **Debug tagging** - Adding the X-Debug-ID header to every ping request,
//! allowing these tagged pings to be sent to the ["Ping Debug Viewer"](https://mozilla.github.io/glean/book/dev/core/internal/debug-pings.html).
//! This may be set by calling glean.set_debug_view_tag(value: &str)
//! or by setting the environment variable GLEAN_DEBUG_VIEW_TAG=<some tag>;
//! * **Source tagging** - Adding the X-Source-Tags header to every ping request,
//! allowing pings to be tagged with custom labels.
//! This may be set by calling glean.set_source_tags(value: Vec<String>)
//! or by setting the environment variable GLEAN_SOURCE_TAGS=<some, tags>;
//!
//! Bindings may implement other debugging features, e.g. sending pings on demand.
use std::env;
const GLEAN_LOG_PINGS: &str = "GLEAN_LOG_PINGS";
const GLEAN_DEBUG_VIEW_TAG: &str = "GLEAN_DEBUG_VIEW_TAG";
const GLEAN_SOURCE_TAGS: &str = "GLEAN_SOURCE_TAGS";
const GLEAN_MAX_SOURCE_TAGS: usize = 5;
/// A representation of all of Glean's debug options.
pub struct DebugOptions {
/// Option to log the payload of pings that are successfully assembled into a ping request.
pub log_pings: DebugOption<bool>,
/// Option to add the X-Debug-ID header to every ping request.
pub debug_view_tag: DebugOption<String>,
/// Option to add the X-Source-Tags header to ping requests. This will allow the data
/// consumers to classify data depending on the applied tags.
pub source_tags: DebugOption<Vec<String>>,
}
impl std::fmt::Debug for DebugOptions {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("DebugOptions")
.field("log_pings", &self.log_pings.get())
.field("debug_view_tag", &self.debug_view_tag.get())
.field("source_tags", &self.source_tags.get())
.finish()
}
}
impl DebugOptions {
pub fn new() -> Self {
Self {
log_pings: DebugOption::new(GLEAN_LOG_PINGS, get_bool_from_str, None),
debug_view_tag: DebugOption::new(GLEAN_DEBUG_VIEW_TAG, Some, Some(validate_tag)),
source_tags: DebugOption::new(
GLEAN_SOURCE_TAGS,
tokenize_string,
Some(validate_source_tags),
),
}
}
}
/// A representation of a debug option,
/// where the value can be set programmatically or come from an environment variable.
#[derive(Debug)]
pub struct DebugOption<T, E = fn(String) -> Option<T>, V = fn(&T) -> bool> {
/// The name of the environment variable related to this debug option.
env: String,
/// The actual value of this option.
value: Option<T>,
/// Function to extract the data of type `T` from a `String`, used when
/// extracting data from the environment.
extraction: E,
/// Optional function to validate the value parsed from the environment
/// or passed to the `set` function.
validation: Option<V>,
}
impl<T, E, V> DebugOption<T, E, V>
where
T: Clone,
E: Fn(String) -> Option<T>,
V: Fn(&T) -> bool,
{
/// Creates a new debug option.
///
/// Tries to get the initial value of the option from the environment.
pub fn new(env: &str, extraction: E, validation: Option<V>) -> Self {
let mut option = Self {
env: env.into(),
value: None,
extraction,
validation,
};
option.set_from_env();
option
}
fn validate(&self, value: &T) -> bool {
if let Some(f) = self.validation.as_ref() {
f(value)
} else {
true
}
}
fn set_from_env(&mut self) {
let extract = &self.extraction;
match env::var(&self.env) {
Ok(env_value) => match extract(env_value.clone()) {
Some(v) => {
self.set(v);
}
None => {
log::error!(
"Unable to parse debug option {}={} into {}. Ignoring.",
self.env,
env_value,
std::any::type_name::<T>()
);
}
},
Err(env::VarError::NotUnicode(_)) => {
log::error!("The value of {} is not valid unicode. Ignoring.", self.env)
}
// The other possible error is that the env var is not set,
// which is not an error for us and can safely be ignored.
Err(_) => {}
}
}
/// Tries to set a value for this debug option.
///
/// Validates the value in case a validation function is available.
///
/// # Returns
///
/// Whether the option passed validation and was succesfully set.
pub fn set(&mut self, value: T) -> bool {
let validated = self.validate(&value);
if validated {
log::info!("Setting the debug option {}.", self.env);
self.value = Some(value);
return true;
}
log::info!("Invalid value for debug option {}.", self.env);
false
}
/// Gets the value of this debug option.
pub fn get(&self) -> Option<&T> {
self.value.as_ref()
}
}
fn get_bool_from_str(value: String) -> Option<bool> {
std::str::FromStr::from_str(&value).ok()
}
fn tokenize_string(value: String) -> Option<Vec<String>> {
let trimmed = value.trim();
if trimmed.is_empty() {
return None;
}
Some(trimmed.split(',').map(|s| s.trim().to_string()).collect())
}
/// A tag is the value used in both the `X-Debug-ID` and `X-Source-Tags` headers
/// of tagged ping requests, thus is it must be a valid header value.
///
/// In other words, it must match the regex: "[a-zA-Z0-9-]{1,20}"
///
/// The regex crate isn't used here because it adds to the binary size,
/// and the Glean SDK doesn't use regular expressions anywhere else.
#[allow(clippy::ptr_arg)]
fn validate_tag(value: &String) -> bool {
if value.is_empty() {
log::error!("A tag must have at least one character.");
return false;
}
let mut iter = value.chars();
let mut count = 0;
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('-') | Some('a'..='z') | Some('A'..='Z') | Some('0'..='9') => (),
// An invalid character
Some(c) => {
log::error!("Invalid character '{}' in the tag.", c);
return false;
}
}
count += 1;
if count == 20 {
log::error!("A tag cannot exceed 20 characters.");
return false;
}
}
}
/// Validate the list of source tags.
///
/// This builds upon the existing `validate_tag` function, since all the
/// tags should respect the same rules to make the pipeline happy.
#[allow(clippy::ptr_arg)]
fn validate_source_tags(tags: &Vec<String>) -> bool {
if tags.is_empty() {
return false;
}
if tags.len() > GLEAN_MAX_SOURCE_TAGS {
log::error!(
"A list of tags cannot contain more than {} elements.",
GLEAN_MAX_SOURCE_TAGS
);
return false;
}
// Filter out tags starting with "glean". They are reserved.
if tags.iter().any(|s| s.starts_with("glean")) {
log::error!("Tags starting with `glean` are reserved and must not be used.");
return false;
}
tags.iter().all(|x| validate_tag(&x))
}
#[cfg(test)]
mod test {
use super::*;
use std::env;
#[test]
fn debug_option_is_correctly_loaded_from_env() {
env::set_var("GLEAN_TEST_1", "test");
let option: DebugOption<String> = DebugOption::new("GLEAN_TEST_1", Some, None);
assert_eq!(option.get().unwrap(), "test");
}
#[test]
fn debug_option_is_correctly_validated_when_necessary() {
#[allow(clippy::ptr_arg)]
fn validate(value: &String) -> bool {
value == "test"
}
// Invalid values from the env are not set
env::set_var("GLEAN_TEST_2", "invalid");
let mut option: DebugOption<String> =
DebugOption::new("GLEAN_TEST_2", Some, Some(validate));
assert!(option.get().is_none());
// Valid values are set using the `set` function
assert!(option.set("test".into()));
assert_eq!(option.get().unwrap(), "test");
// Invalid values are not set using the `set` function
assert!(!option.set("invalid".into()));
assert_eq!(option.get().unwrap(), "test");
}
#[test]
fn tokenize_string_splits_correctly() {
// Valid list is properly tokenized and spaces are trimmed.
assert_eq!(
Some(vec!["test1".to_string(), "test2".to_string()]),
tokenize_string(" test1, test2 ".to_string())
);
// Empty strings return no item.
assert_eq!(None, tokenize_string("".to_string()));
}
#[test]
fn validates_tag_correctly() {
assert!(validate_tag(&"valid-value".to_string()));
assert!(validate_tag(&"-also-valid-value".to_string()));
assert!(!validate_tag(&"invalid_value".to_string()));
assert!(!validate_tag(&"invalid value".to_string()));
assert!(!validate_tag(&"!nv@lid-val*e".to_string()));
assert!(!validate_tag(
&"invalid-value-because-way-too-long".to_string()
));
assert!(!validate_tag(&"".to_string()));
}
#[test]
fn validates_source_tags_correctly() {
// Empty tags.
assert!(!validate_source_tags(&vec!["".to_string()]));
// Too many tags.
assert!(!validate_source_tags(&vec![
"1".to_string(),
"2".to_string(),
"3".to_string(),
"4".to_string(),
"5".to_string(),
"6".to_string()
]));
// Invalid tags.
assert!(!validate_source_tags(&vec!["!nv@lid-val*e".to_string()]));
// Entries starting with 'glean' are filtered out.
assert!(!validate_source_tags(&vec![
"glean-test1".to_string(),
"test2".to_string()
]));
}
}

378
third_party/rust/glean-core/src/error.rs поставляемый
Просмотреть файл

@ -1,189 +1,189 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::ffi::OsString;
use std::fmt::{self, Display};
use std::io;
use std::result;
use ffi_support::{handle_map::HandleError, ExternError};
use rkv::StoreError;
/// A specialized [`Result`] type for this crate's operations.
///
/// This is generally used to avoid writing out [Error] directly and
/// is otherwise a direct mapping to [`Result`].
///
/// [`Result`]: https://doc.rust-lang.org/stable/std/result/enum.Result.html
/// [`Error`]: std.struct.Error.html
pub type Result<T> = result::Result<T, Error>;
/// A list enumerating the categories of errors in this crate.
///
/// [`Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
///
/// This list is intended to grow over time and it is not recommended to
/// exhaustively match against it.
#[derive(Debug)]
#[non_exhaustive]
pub enum ErrorKind {
/// Lifetime conversion failed
Lifetime(i32),
/// FFI-Support error
Handle(HandleError),
/// IO error
IoError(io::Error),
/// IO error
Rkv(StoreError),
/// JSON error
Json(serde_json::error::Error),
/// TimeUnit conversion failed
TimeUnit(i32),
/// MemoryUnit conversion failed
MemoryUnit(i32),
/// HistogramType conversion failed
HistogramType(i32),
/// OsString conversion failed
OsString(OsString),
/// Unknown error
Utf8Error,
/// Glean initialization was attempted with an invalid configuration
InvalidConfig,
/// Glean not initialized
NotInitialized,
/// Ping request body size overflowed
PingBodyOverflow(usize),
}
/// A specialized [`Error`] type for this crate's operations.
///
/// [`Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
#[derive(Debug)]
pub struct Error {
kind: ErrorKind,
}
impl Error {
/// Returns a new UTF-8 error
///
/// This is exposed in order to expose conversion errors on the FFI layer.
pub fn utf8_error() -> Error {
Error {
kind: ErrorKind::Utf8Error,
}
}
/// Indicates an error that no requested global object is initialized
pub fn not_initialized() -> Error {
Error {
kind: ErrorKind::NotInitialized,
}
}
/// Returns the kind of the current error instance.
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl std::error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ErrorKind::*;
match self.kind() {
Lifetime(l) => write!(f, "Lifetime conversion from {} failed", l),
Handle(e) => write!(f, "Invalid handle: {}", e),
IoError(e) => write!(f, "An I/O error occurred: {}", e),
Rkv(e) => write!(f, "An Rkv error occurred: {}", e),
Json(e) => write!(f, "A JSON error occurred: {}", e),
TimeUnit(t) => write!(f, "TimeUnit conversion from {} failed", t),
MemoryUnit(m) => write!(f, "MemoryUnit conversion from {} failed", m),
HistogramType(h) => write!(f, "HistogramType conversion from {} failed", h),
OsString(s) => write!(f, "OsString conversion from {:?} failed", s),
Utf8Error => write!(f, "Invalid UTF-8 byte sequence in string"),
InvalidConfig => write!(f, "Invalid Glean configuration provided"),
NotInitialized => write!(f, "Global Glean object missing"),
PingBodyOverflow(s) => write!(
f,
"Ping request body size exceeded maximum size allowed: {}kB.",
s / 1024
),
}
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error { kind }
}
}
impl From<HandleError> for Error {
fn from(error: HandleError) -> Error {
Error {
kind: ErrorKind::Handle(error),
}
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error {
kind: ErrorKind::IoError(error),
}
}
}
impl From<StoreError> for Error {
fn from(error: StoreError) -> Error {
Error {
kind: ErrorKind::Rkv(error),
}
}
}
impl From<Error> for ExternError {
fn from(error: Error) -> ExternError {
ffi_support::ExternError::new_error(ffi_support::ErrorCode::new(42), format!("{}", error))
}
}
impl From<serde_json::error::Error> for Error {
fn from(error: serde_json::error::Error) -> Error {
Error {
kind: ErrorKind::Json(error),
}
}
}
impl From<OsString> for Error {
fn from(error: OsString) -> Error {
Error {
kind: ErrorKind::OsString(error),
}
}
}
/// To satisfy integer conversion done by the macros on the FFI side, we need to be able to turn
/// something infallible into an error.
/// This will never actually be reached, as an integer-to-integer conversion is infallible.
impl From<std::convert::Infallible> for Error {
fn from(_: std::convert::Infallible) -> Error {
unreachable!()
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::ffi::OsString;
use std::fmt::{self, Display};
use std::io;
use std::result;
use ffi_support::{handle_map::HandleError, ExternError};
use rkv::StoreError;
/// A specialized [`Result`] type for this crate's operations.
///
/// This is generally used to avoid writing out [Error] directly and
/// is otherwise a direct mapping to [`Result`].
///
/// [`Result`]: https://doc.rust-lang.org/stable/std/result/enum.Result.html
/// [`Error`]: std.struct.Error.html
pub type Result<T> = result::Result<T, Error>;
/// A list enumerating the categories of errors in this crate.
///
/// [`Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
///
/// This list is intended to grow over time and it is not recommended to
/// exhaustively match against it.
#[derive(Debug)]
#[non_exhaustive]
pub enum ErrorKind {
/// Lifetime conversion failed
Lifetime(i32),
/// FFI-Support error
Handle(HandleError),
/// IO error
IoError(io::Error),
/// IO error
Rkv(StoreError),
/// JSON error
Json(serde_json::error::Error),
/// TimeUnit conversion failed
TimeUnit(i32),
/// MemoryUnit conversion failed
MemoryUnit(i32),
/// HistogramType conversion failed
HistogramType(i32),
/// OsString conversion failed
OsString(OsString),
/// Unknown error
Utf8Error,
/// Glean initialization was attempted with an invalid configuration
InvalidConfig,
/// Glean not initialized
NotInitialized,
/// Ping request body size overflowed
PingBodyOverflow(usize),
}
/// A specialized [`Error`] type for this crate's operations.
///
/// [`Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
#[derive(Debug)]
pub struct Error {
kind: ErrorKind,
}
impl Error {
/// Returns a new UTF-8 error
///
/// This is exposed in order to expose conversion errors on the FFI layer.
pub fn utf8_error() -> Error {
Error {
kind: ErrorKind::Utf8Error,
}
}
/// Indicates an error that no requested global object is initialized
pub fn not_initialized() -> Error {
Error {
kind: ErrorKind::NotInitialized,
}
}
/// Returns the kind of the current error instance.
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl std::error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ErrorKind::*;
match self.kind() {
Lifetime(l) => write!(f, "Lifetime conversion from {} failed", l),
Handle(e) => write!(f, "Invalid handle: {}", e),
IoError(e) => write!(f, "An I/O error occurred: {}", e),
Rkv(e) => write!(f, "An Rkv error occurred: {}", e),
Json(e) => write!(f, "A JSON error occurred: {}", e),
TimeUnit(t) => write!(f, "TimeUnit conversion from {} failed", t),
MemoryUnit(m) => write!(f, "MemoryUnit conversion from {} failed", m),
HistogramType(h) => write!(f, "HistogramType conversion from {} failed", h),
OsString(s) => write!(f, "OsString conversion from {:?} failed", s),
Utf8Error => write!(f, "Invalid UTF-8 byte sequence in string"),
InvalidConfig => write!(f, "Invalid Glean configuration provided"),
NotInitialized => write!(f, "Global Glean object missing"),
PingBodyOverflow(s) => write!(
f,
"Ping request body size exceeded maximum size allowed: {}kB.",
s / 1024
),
}
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error { kind }
}
}
impl From<HandleError> for Error {
fn from(error: HandleError) -> Error {
Error {
kind: ErrorKind::Handle(error),
}
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error {
kind: ErrorKind::IoError(error),
}
}
}
impl From<StoreError> for Error {
fn from(error: StoreError) -> Error {
Error {
kind: ErrorKind::Rkv(error),
}
}
}
impl From<Error> for ExternError {
fn from(error: Error) -> ExternError {
ffi_support::ExternError::new_error(ffi_support::ErrorCode::new(42), format!("{}", error))
}
}
impl From<serde_json::error::Error> for Error {
fn from(error: serde_json::error::Error) -> Error {
Error {
kind: ErrorKind::Json(error),
}
}
}
impl From<OsString> for Error {
fn from(error: OsString) -> Error {
Error {
kind: ErrorKind::OsString(error),
}
}
}
/// To satisfy integer conversion done by the macros on the FFI side, we need to be able to turn
/// something infallible into an error.
/// This will never actually be reached, as an integer-to-integer conversion is infallible.
impl From<std::convert::Infallible> for Error {
fn from(_: std::convert::Infallible) -> Error {
unreachable!()
}
}

Просмотреть файл

@ -1,223 +1,223 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! # Error Recording
//!
//! Glean keeps track of errors that occured due to invalid labels or invalid values when recording
//! other metrics.
//!
//! Error counts are stored in labeled counters in the `glean.error` category.
//! The labeled counter metrics that store the errors are defined in the `metrics.yaml` for documentation purposes,
//! but are not actually used directly, since the `send_in_pings` value needs to match the pings of the metric that is erroring (plus the "metrics" ping),
//! not some constant value that we could define in `metrics.yaml`.
use std::convert::TryFrom;
use std::fmt::Display;
use crate::error::{Error, ErrorKind};
use crate::metrics::CounterMetric;
use crate::metrics::{combine_base_identifier_and_label, strip_label};
use crate::CommonMetricData;
use crate::Glean;
use crate::Lifetime;
/// The possible error types for metric recording.
/// Note: the cases in this enum must be kept in sync with the ones
/// in the platform-specific code (e.g. ErrorType.kt) and with the
/// metrics in the registry files.
#[derive(Debug, PartialEq)]
pub enum ErrorType {
/// For when the value to be recorded does not match the metric-specific restrictions
InvalidValue,
/// For when the label of a labeled metric does not match the restrictions
InvalidLabel,
/// For when the metric caught an invalid state while recording
InvalidState,
/// For when the value to be recorded overflows the metric-specific upper range
InvalidOverflow,
}
impl ErrorType {
/// The error type's metric id
pub fn as_str(&self) -> &'static str {
match self {
ErrorType::InvalidValue => "invalid_value",
ErrorType::InvalidLabel => "invalid_label",
ErrorType::InvalidState => "invalid_state",
ErrorType::InvalidOverflow => "invalid_overflow",
}
}
}
impl TryFrom<i32> for ErrorType {
type Error = Error;
fn try_from(value: i32) -> Result<ErrorType, Self::Error> {
match value {
0 => Ok(ErrorType::InvalidValue),
1 => Ok(ErrorType::InvalidLabel),
2 => Ok(ErrorType::InvalidState),
3 => Ok(ErrorType::InvalidOverflow),
e => Err(ErrorKind::Lifetime(e).into()),
}
}
}
/// For a given metric, get the metric in which to record errors
fn get_error_metric_for_metric(meta: &CommonMetricData, error: ErrorType) -> CounterMetric {
// Can't use meta.identifier here, since that might cause infinite recursion
// if the label on this metric needs to report an error.
let identifier = meta.base_identifier();
let name = strip_label(&identifier);
// Record errors in the pings the metric is in, as well as the metrics ping.
let mut send_in_pings = meta.send_in_pings.clone();
let ping_name = "metrics".to_string();
if !send_in_pings.contains(&ping_name) {
send_in_pings.push(ping_name);
}
CounterMetric::new(CommonMetricData {
name: combine_base_identifier_and_label(error.as_str(), name),
category: "glean.error".into(),
lifetime: Lifetime::Ping,
send_in_pings,
..Default::default()
})
}
/// Records an error into Glean.
///
/// Errors are recorded as labeled counters in the `glean.error` category.
///
/// *Note*: We do make assumptions here how labeled metrics are encoded, namely by having the name
/// `<name>/<label>`.
/// Errors do not adhere to the usual "maximum label" restriction.
///
/// # Arguments
///
/// * `glean` - The Glean instance containing the database
/// * `meta` - The metric's meta data
/// * `error` - The error type to record
/// * `message` - The message to log. This message is not sent with the ping.
/// It does not need to include the metric id, as that is automatically prepended to the message.
/// * `num_errors` - The number of errors of the same type to report.
pub fn record_error<O: Into<Option<i32>>>(
glean: &Glean,
meta: &CommonMetricData,
error: ErrorType,
message: impl Display,
num_errors: O,
) {
let metric = get_error_metric_for_metric(meta, error);
log::warn!("{}: {}", meta.base_identifier(), message);
let to_report = num_errors.into().unwrap_or(1);
debug_assert!(to_report > 0);
metric.add(glean, to_report);
}
/// Gets the number of recorded errors for the given metric and error type.
///
/// *Notes: This is a **test-only** API, but we need to expose it to be used in integration tests.
///
/// # Arguments
///
/// * `glean` - The Glean object holding the database
/// * `meta` - The metadata of the metric instance
/// * `error` - The type of error
///
/// # Returns
///
/// The number of errors reported.
pub fn test_get_num_recorded_errors(
glean: &Glean,
meta: &CommonMetricData,
error: ErrorType,
ping_name: Option<&str>,
) -> Result<i32, String> {
let use_ping_name = ping_name.unwrap_or(&meta.send_in_pings[0]);
let metric = get_error_metric_for_metric(meta, error);
metric.test_get_value(glean, use_ping_name).ok_or_else(|| {
format!(
"No error recorded for {} in '{}' store",
meta.base_identifier(),
use_ping_name
)
})
}
#[cfg(test)]
mod test {
use super::*;
use crate::metrics::*;
use crate::tests::new_glean;
#[test]
fn error_type_i32_mapping() {
let error: ErrorType = std::convert::TryFrom::try_from(0).unwrap();
assert_eq!(error, ErrorType::InvalidValue);
let error: ErrorType = std::convert::TryFrom::try_from(1).unwrap();
assert_eq!(error, ErrorType::InvalidLabel);
let error: ErrorType = std::convert::TryFrom::try_from(2).unwrap();
assert_eq!(error, ErrorType::InvalidState);
let error: ErrorType = std::convert::TryFrom::try_from(3).unwrap();
assert_eq!(error, ErrorType::InvalidOverflow);
}
#[test]
fn recording_of_all_error_types() {
let (glean, _t) = new_glean(None);
let string_metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into(), "store2".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
let expected_invalid_values_errors: i32 = 1;
let expected_invalid_labels_errors: i32 = 2;
record_error(
&glean,
string_metric.meta(),
ErrorType::InvalidValue,
"Invalid value",
None,
);
record_error(
&glean,
string_metric.meta(),
ErrorType::InvalidLabel,
"Invalid label",
expected_invalid_labels_errors,
);
for store in &["store1", "store2", "metrics"] {
assert_eq!(
Ok(expected_invalid_values_errors),
test_get_num_recorded_errors(
&glean,
string_metric.meta(),
ErrorType::InvalidValue,
Some(store)
)
);
assert_eq!(
Ok(expected_invalid_labels_errors),
test_get_num_recorded_errors(
&glean,
string_metric.meta(),
ErrorType::InvalidLabel,
Some(store)
)
);
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! # Error Recording
//!
//! Glean keeps track of errors that occured due to invalid labels or invalid values when recording
//! other metrics.
//!
//! Error counts are stored in labeled counters in the `glean.error` category.
//! The labeled counter metrics that store the errors are defined in the `metrics.yaml` for documentation purposes,
//! but are not actually used directly, since the `send_in_pings` value needs to match the pings of the metric that is erroring (plus the "metrics" ping),
//! not some constant value that we could define in `metrics.yaml`.
use std::convert::TryFrom;
use std::fmt::Display;
use crate::error::{Error, ErrorKind};
use crate::metrics::CounterMetric;
use crate::metrics::{combine_base_identifier_and_label, strip_label};
use crate::CommonMetricData;
use crate::Glean;
use crate::Lifetime;
/// The possible error types for metric recording.
/// Note: the cases in this enum must be kept in sync with the ones
/// in the platform-specific code (e.g. ErrorType.kt) and with the
/// metrics in the registry files.
#[derive(Debug, PartialEq)]
pub enum ErrorType {
/// For when the value to be recorded does not match the metric-specific restrictions
InvalidValue,
/// For when the label of a labeled metric does not match the restrictions
InvalidLabel,
/// For when the metric caught an invalid state while recording
InvalidState,
/// For when the value to be recorded overflows the metric-specific upper range
InvalidOverflow,
}
impl ErrorType {
/// The error type's metric id
pub fn as_str(&self) -> &'static str {
match self {
ErrorType::InvalidValue => "invalid_value",
ErrorType::InvalidLabel => "invalid_label",
ErrorType::InvalidState => "invalid_state",
ErrorType::InvalidOverflow => "invalid_overflow",
}
}
}
impl TryFrom<i32> for ErrorType {
type Error = Error;
fn try_from(value: i32) -> Result<ErrorType, Self::Error> {
match value {
0 => Ok(ErrorType::InvalidValue),
1 => Ok(ErrorType::InvalidLabel),
2 => Ok(ErrorType::InvalidState),
3 => Ok(ErrorType::InvalidOverflow),
e => Err(ErrorKind::Lifetime(e).into()),
}
}
}
/// For a given metric, get the metric in which to record errors
fn get_error_metric_for_metric(meta: &CommonMetricData, error: ErrorType) -> CounterMetric {
// Can't use meta.identifier here, since that might cause infinite recursion
// if the label on this metric needs to report an error.
let identifier = meta.base_identifier();
let name = strip_label(&identifier);
// Record errors in the pings the metric is in, as well as the metrics ping.
let mut send_in_pings = meta.send_in_pings.clone();
let ping_name = "metrics".to_string();
if !send_in_pings.contains(&ping_name) {
send_in_pings.push(ping_name);
}
CounterMetric::new(CommonMetricData {
name: combine_base_identifier_and_label(error.as_str(), name),
category: "glean.error".into(),
lifetime: Lifetime::Ping,
send_in_pings,
..Default::default()
})
}
/// Records an error into Glean.
///
/// Errors are recorded as labeled counters in the `glean.error` category.
///
/// *Note*: We do make assumptions here how labeled metrics are encoded, namely by having the name
/// `<name>/<label>`.
/// Errors do not adhere to the usual "maximum label" restriction.
///
/// # Arguments
///
/// * `glean` - The Glean instance containing the database
/// * `meta` - The metric's meta data
/// * `error` - The error type to record
/// * `message` - The message to log. This message is not sent with the ping.
/// It does not need to include the metric id, as that is automatically prepended to the message.
/// * `num_errors` - The number of errors of the same type to report.
pub fn record_error<O: Into<Option<i32>>>(
glean: &Glean,
meta: &CommonMetricData,
error: ErrorType,
message: impl Display,
num_errors: O,
) {
let metric = get_error_metric_for_metric(meta, error);
log::warn!("{}: {}", meta.base_identifier(), message);
let to_report = num_errors.into().unwrap_or(1);
debug_assert!(to_report > 0);
metric.add(glean, to_report);
}
/// Gets the number of recorded errors for the given metric and error type.
///
/// *Notes: This is a **test-only** API, but we need to expose it to be used in integration tests.
///
/// # Arguments
///
/// * `glean` - The Glean object holding the database
/// * `meta` - The metadata of the metric instance
/// * `error` - The type of error
///
/// # Returns
///
/// The number of errors reported.
pub fn test_get_num_recorded_errors(
glean: &Glean,
meta: &CommonMetricData,
error: ErrorType,
ping_name: Option<&str>,
) -> Result<i32, String> {
let use_ping_name = ping_name.unwrap_or(&meta.send_in_pings[0]);
let metric = get_error_metric_for_metric(meta, error);
metric.test_get_value(glean, use_ping_name).ok_or_else(|| {
format!(
"No error recorded for {} in '{}' store",
meta.base_identifier(),
use_ping_name
)
})
}
#[cfg(test)]
mod test {
use super::*;
use crate::metrics::*;
use crate::tests::new_glean;
#[test]
fn error_type_i32_mapping() {
let error: ErrorType = std::convert::TryFrom::try_from(0).unwrap();
assert_eq!(error, ErrorType::InvalidValue);
let error: ErrorType = std::convert::TryFrom::try_from(1).unwrap();
assert_eq!(error, ErrorType::InvalidLabel);
let error: ErrorType = std::convert::TryFrom::try_from(2).unwrap();
assert_eq!(error, ErrorType::InvalidState);
let error: ErrorType = std::convert::TryFrom::try_from(3).unwrap();
assert_eq!(error, ErrorType::InvalidOverflow);
}
#[test]
fn recording_of_all_error_types() {
let (glean, _t) = new_glean(None);
let string_metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into(), "store2".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
let expected_invalid_values_errors: i32 = 1;
let expected_invalid_labels_errors: i32 = 2;
record_error(
&glean,
string_metric.meta(),
ErrorType::InvalidValue,
"Invalid value",
None,
);
record_error(
&glean,
string_metric.meta(),
ErrorType::InvalidLabel,
"Invalid label",
expected_invalid_labels_errors,
);
for store in &["store1", "store2", "metrics"] {
assert_eq!(
Ok(expected_invalid_values_errors),
test_get_num_recorded_errors(
&glean,
string_metric.meta(),
ErrorType::InvalidValue,
Some(store)
)
);
assert_eq!(
Ok(expected_invalid_labels_errors),
test_get_num_recorded_errors(
&glean,
string_metric.meta(),
ErrorType::InvalidLabel,
Some(store)
)
);
}
}
}

Просмотреть файл

@ -1,497 +1,497 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use std::fs;
use std::fs::{create_dir_all, File, OpenOptions};
use std::io::BufRead;
use std::io::BufReader;
use std::io::Write;
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
use std::sync::RwLock;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
use crate::CommonMetricData;
use crate::Glean;
use crate::Result;
/// Represents the recorded data for a single event.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct RecordedEvent {
/// The timestamp of when the event was recorded.
///
/// This allows to order events from a single process run.
pub timestamp: u64,
/// The event's category.
///
/// This is defined by users in the metrics file.
pub category: String,
/// The event's name.
///
/// This is defined by users in the metrics file.
pub name: String,
/// A map of all extra data values.
///
/// The set of allowed extra keys is defined by users in the metrics file.
#[serde(skip_serializing_if = "Option::is_none")]
pub extra: Option<HashMap<String, String>>,
}
impl RecordedEvent {
/// Serialize an event to JSON, adjusting its timestamp relative to a base timestamp
fn serialize_relative(&self, timestamp_offset: u64) -> JsonValue {
json!(&RecordedEvent {
timestamp: self.timestamp - timestamp_offset,
category: self.category.clone(),
name: self.name.clone(),
extra: self.extra.clone(),
})
}
}
/// This struct handles the in-memory and on-disk storage logic for events.
///
/// So that the data survives shutting down of the application, events are stored
/// in an append-only file on disk, in addition to the store in memory. Each line
/// of this file records a single event in JSON, exactly as it will be sent in the
/// ping. There is one file per store.
///
/// When restarting the application, these on-disk files are checked, and if any are
/// found, they are loaded, queued for sending and flushed immediately before any
/// further events are collected. This is because the timestamps for these events
/// may have come from a previous boot of the device, and therefore will not be
/// compatible with any newly-collected events.
#[derive(Debug)]
pub struct EventDatabase {
/// Path to directory of on-disk event files
pub path: PathBuf,
/// The in-memory list of events
event_stores: RwLock<HashMap<String, Vec<RecordedEvent>>>,
/// A lock to be held when doing operations on the filesystem
file_lock: RwLock<()>,
}
impl EventDatabase {
/// Creates a new event database.
///
/// # Arguments
///
/// * `data_path` - The directory to store events in. A new directory
/// * `events` - will be created inside of this directory.
pub fn new(data_path: &str) -> Result<Self> {
let path = Path::new(data_path).join("events");
create_dir_all(&path)?;
Ok(Self {
path,
event_stores: RwLock::new(HashMap::new()),
file_lock: RwLock::new(()),
})
}
/// Initializes events storage after Glean is fully initialized and ready to send pings.
///
/// This must be called once on application startup, e.g. from
/// [Glean.initialize], but after we are ready to send pings, since this
/// could potentially collect and send pings.
///
/// If there are any events queued on disk, it loads them into memory so
/// that the memory and disk representations are in sync.
///
/// Secondly, if this is the first time the application has been run since
/// rebooting, any pings containing events are assembled into pings and cleared
/// immediately, since their timestamps won't be compatible with the timestamps
/// we would create during this boot of the device.
///
/// # Arguments
///
/// * `glean` - The Glean instance.
///
/// # Returns
///
/// Whether at least one ping was generated.
pub fn flush_pending_events_on_startup(&self, glean: &Glean) -> bool {
match self.load_events_from_disk() {
Ok(_) => self.send_all_events(glean),
Err(err) => {
log::error!("Error loading events from disk: {}", err);
false
}
}
}
fn load_events_from_disk(&self) -> Result<()> {
let _lock = self.file_lock.read().unwrap(); // safe unwrap, only error case is poisoning
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
for entry in fs::read_dir(&self.path)? {
let entry = entry?;
if entry.file_type()?.is_file() {
let store_name = entry.file_name().into_string()?;
let file = BufReader::new(File::open(entry.path())?);
db.insert(
store_name,
file.lines()
.filter_map(|line| line.ok())
.filter_map(|line| serde_json::from_str::<RecordedEvent>(&line).ok())
.collect(),
);
}
}
Ok(())
}
fn send_all_events(&self, glean: &Glean) -> bool {
let store_names = {
let db = self.event_stores.read().unwrap(); // safe unwrap, only error case is poisoning
db.keys().cloned().collect::<Vec<String>>()
};
let mut ping_sent = false;
for store_name in store_names {
if let Err(err) = glean.submit_ping_by_name(&store_name, Some("startup")) {
log::error!(
"Error flushing existing events to the '{}' ping: {}",
store_name,
err
);
} else {
ping_sent = true;
}
}
ping_sent
}
/// Records an event in the desired stores.
///
/// # Arguments
///
/// * `glean` - The Glean instance.
/// * `meta` - The metadata about the event metric. Used to get the category,
/// name and stores for the metric.
/// * `timestamp` - The timestamp of the event, in milliseconds. Must use a
/// monotonically increasing timer (this value is obtained on the
/// platform-specific side).
/// * `extra` - Extra data values, mapping strings to strings.
pub fn record(
&self,
glean: &Glean,
meta: &CommonMetricData,
timestamp: u64,
extra: Option<HashMap<String, String>>,
) {
// If upload is disabled we don't want to record.
if !glean.is_upload_enabled() {
return;
}
// Create RecordedEvent object, and its JSON form for serialization
// on disk.
let event = RecordedEvent {
timestamp,
category: meta.category.to_string(),
name: meta.name.to_string(),
extra,
};
let event_json = serde_json::to_string(&event).unwrap(); // safe unwrap, event can always be serialized
// Store the event in memory and on disk to each of the stores.
let mut stores_to_submit: Vec<&str> = Vec::new();
{
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
for store_name in meta.send_in_pings.iter() {
let store = db.entry(store_name.to_string()).or_insert_with(Vec::new);
store.push(event.clone());
self.write_event_to_disk(store_name, &event_json);
if store.len() == glean.get_max_events() {
stores_to_submit.push(&store_name);
}
}
}
// If any of the event stores reached maximum size, submit the pings
// containing those events immediately.
for store_name in stores_to_submit {
if let Err(err) = glean.submit_ping_by_name(store_name, Some("max_capacity")) {
log::error!(
"Got more than {} events, but could not send {} ping: {}",
glean.get_max_events(),
store_name,
err
);
}
}
}
/// Writes an event to a single store on disk.
///
/// # Arguments
///
/// * `store_name` - The name of the store.
/// * `event_json` - The event content, as a single-line JSON-encoded string.
fn write_event_to_disk(&self, store_name: &str, event_json: &str) {
let _lock = self.file_lock.write().unwrap(); // safe unwrap, only error case is poisoning
if let Err(err) = OpenOptions::new()
.create(true)
.append(true)
.open(self.path.join(store_name))
.and_then(|mut file| writeln!(file, "{}", event_json))
{
log::error!("IO error writing event to store '{}': {}", store_name, err);
}
}
/// Gets a snapshot of the stored event data as a JsonValue.
///
/// # Arguments
///
/// * `store_name` - The name of the desired store.
/// * `clear_store` - Whether to clear the store after snapshotting.
///
/// # Returns
///
/// A array of events, JSON encoded, if any. Otherwise `None`.
pub fn snapshot_as_json(&self, store_name: &str, clear_store: bool) -> Option<JsonValue> {
let result = {
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
db.get_mut(&store_name.to_string()).and_then(|store| {
if !store.is_empty() {
// Timestamps may have been recorded out-of-order, so sort the events
// by the timestamp.
// We can't insert events in order as-we-go, because we also append
// events to a file on disk, where this would be expensive. Best to
// handle this in every case (whether events came from disk or memory)
// in a single location.
store.sort_by(|a, b| a.timestamp.cmp(&b.timestamp));
let first_timestamp = store[0].timestamp;
Some(JsonValue::from_iter(
store.iter().map(|e| e.serialize_relative(first_timestamp)),
))
} else {
log::error!("Unexpectly got empty event store for '{}'", store_name);
None
}
})
};
if clear_store {
self.event_stores
.write()
.unwrap() // safe unwrap, only error case is poisoning
.remove(&store_name.to_string());
let _lock = self.file_lock.write().unwrap(); // safe unwrap, only error case is poisoning
if let Err(err) = fs::remove_file(self.path.join(store_name)) {
match err.kind() {
std::io::ErrorKind::NotFound => {
// silently drop this error, the file was already non-existing
}
_ => log::error!("Error removing events queue file '{}': {}", store_name, err),
}
}
}
result
}
/// Clears all stored events, both in memory and on-disk.
pub fn clear_all(&self) -> Result<()> {
// safe unwrap, only error case is poisoning
self.event_stores.write().unwrap().clear();
// safe unwrap, only error case is poisoning
let _lock = self.file_lock.write().unwrap();
std::fs::remove_dir_all(&self.path)?;
create_dir_all(&self.path)?;
Ok(())
}
/// **Test-only API (exported for FFI purposes).**
///
/// Returns whether there are any events currently stored for the given even
/// metric.
///
/// This doesn't clear the stored value.
pub fn test_has_value<'a>(&'a self, meta: &'a CommonMetricData, store_name: &str) -> bool {
self.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
.get(&store_name.to_string())
.into_iter()
.flatten()
.any(|event| event.name == meta.name && event.category == meta.category)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the vector of currently stored events for the given event metric in
/// the given store.
///
/// This doesn't clear the stored value.
pub fn test_get_value<'a>(
&'a self,
meta: &'a CommonMetricData,
store_name: &str,
) -> Option<Vec<RecordedEvent>> {
let value: Vec<RecordedEvent> = self
.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
.get(&store_name.to_string())
.into_iter()
.flatten()
.filter(|event| event.name == meta.name && event.category == meta.category)
.cloned()
.collect();
if !value.is_empty() {
Some(value)
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::tests::new_glean;
use crate::CommonMetricData;
#[test]
fn handle_truncated_events_on_disk() {
let t = tempfile::tempdir().unwrap();
{
let db = EventDatabase::new(&t.path().display().to_string()).unwrap();
db.write_event_to_disk("events", "{\"timestamp\": 500");
db.write_event_to_disk("events", "{\"timestamp\"");
db.write_event_to_disk(
"events",
"{\"timestamp\": 501, \"category\": \"ui\", \"name\": \"click\"}",
);
}
{
let db = EventDatabase::new(&t.path().display().to_string()).unwrap();
db.load_events_from_disk().unwrap();
let events = &db.event_stores.read().unwrap()["events"];
assert_eq!(1, events.len());
}
}
#[test]
fn stable_serialization() {
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: Some(data),
};
let event_empty_json = ::serde_json::to_string_pretty(&event_empty).unwrap();
let event_data_json = ::serde_json::to_string_pretty(&event_data).unwrap();
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
}
#[test]
fn deserialize_existing_data() {
let event_empty_json = r#"
{
"timestamp": 2,
"category": "cat",
"name": "name"
}
"#;
let event_data_json = r#"
{
"timestamp": 2,
"category": "cat",
"name": "name",
"extra": {
"a key": "a value"
}
}
"#;
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: Some(data),
};
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
}
#[test]
fn doesnt_record_when_upload_is_disabled() {
let (mut glean, dir) = new_glean(None);
let db = EventDatabase::new(dir.path().to_str().unwrap()).unwrap();
let test_storage = "test-storage";
let test_category = "category";
let test_name = "name";
let test_timestamp = 2;
let test_meta = CommonMetricData::new(test_category, test_name, test_storage);
let event_data = RecordedEvent {
timestamp: test_timestamp,
category: test_category.to_string(),
name: test_name.to_string(),
extra: None,
};
// Upload is not yet disabled,
// so let's check that everything is getting recorded as expected.
db.record(&glean, &test_meta, 2, None);
{
let event_stores = db.event_stores.read().unwrap();
assert_eq!(&event_data, &event_stores.get(test_storage).unwrap()[0]);
assert_eq!(event_stores.get(test_storage).unwrap().len(), 1);
}
glean.set_upload_enabled(false);
// Now that upload is disabled, let's check nothing is recorded.
db.record(&glean, &test_meta, 2, None);
{
let event_stores = db.event_stores.read().unwrap();
assert_eq!(event_stores.get(test_storage).unwrap().len(), 1);
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use std::fs;
use std::fs::{create_dir_all, File, OpenOptions};
use std::io::BufRead;
use std::io::BufReader;
use std::io::Write;
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
use std::sync::RwLock;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
use crate::CommonMetricData;
use crate::Glean;
use crate::Result;
/// Represents the recorded data for a single event.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct RecordedEvent {
/// The timestamp of when the event was recorded.
///
/// This allows to order events from a single process run.
pub timestamp: u64,
/// The event's category.
///
/// This is defined by users in the metrics file.
pub category: String,
/// The event's name.
///
/// This is defined by users in the metrics file.
pub name: String,
/// A map of all extra data values.
///
/// The set of allowed extra keys is defined by users in the metrics file.
#[serde(skip_serializing_if = "Option::is_none")]
pub extra: Option<HashMap<String, String>>,
}
impl RecordedEvent {
/// Serialize an event to JSON, adjusting its timestamp relative to a base timestamp
fn serialize_relative(&self, timestamp_offset: u64) -> JsonValue {
json!(&RecordedEvent {
timestamp: self.timestamp - timestamp_offset,
category: self.category.clone(),
name: self.name.clone(),
extra: self.extra.clone(),
})
}
}
/// This struct handles the in-memory and on-disk storage logic for events.
///
/// So that the data survives shutting down of the application, events are stored
/// in an append-only file on disk, in addition to the store in memory. Each line
/// of this file records a single event in JSON, exactly as it will be sent in the
/// ping. There is one file per store.
///
/// When restarting the application, these on-disk files are checked, and if any are
/// found, they are loaded, queued for sending and flushed immediately before any
/// further events are collected. This is because the timestamps for these events
/// may have come from a previous boot of the device, and therefore will not be
/// compatible with any newly-collected events.
#[derive(Debug)]
pub struct EventDatabase {
/// Path to directory of on-disk event files
pub path: PathBuf,
/// The in-memory list of events
event_stores: RwLock<HashMap<String, Vec<RecordedEvent>>>,
/// A lock to be held when doing operations on the filesystem
file_lock: RwLock<()>,
}
impl EventDatabase {
/// Creates a new event database.
///
/// # Arguments
///
/// * `data_path` - The directory to store events in. A new directory
/// * `events` - will be created inside of this directory.
pub fn new(data_path: &str) -> Result<Self> {
let path = Path::new(data_path).join("events");
create_dir_all(&path)?;
Ok(Self {
path,
event_stores: RwLock::new(HashMap::new()),
file_lock: RwLock::new(()),
})
}
/// Initializes events storage after Glean is fully initialized and ready to send pings.
///
/// This must be called once on application startup, e.g. from
/// [Glean.initialize], but after we are ready to send pings, since this
/// could potentially collect and send pings.
///
/// If there are any events queued on disk, it loads them into memory so
/// that the memory and disk representations are in sync.
///
/// Secondly, if this is the first time the application has been run since
/// rebooting, any pings containing events are assembled into pings and cleared
/// immediately, since their timestamps won't be compatible with the timestamps
/// we would create during this boot of the device.
///
/// # Arguments
///
/// * `glean` - The Glean instance.
///
/// # Returns
///
/// Whether at least one ping was generated.
pub fn flush_pending_events_on_startup(&self, glean: &Glean) -> bool {
match self.load_events_from_disk() {
Ok(_) => self.send_all_events(glean),
Err(err) => {
log::error!("Error loading events from disk: {}", err);
false
}
}
}
fn load_events_from_disk(&self) -> Result<()> {
let _lock = self.file_lock.read().unwrap(); // safe unwrap, only error case is poisoning
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
for entry in fs::read_dir(&self.path)? {
let entry = entry?;
if entry.file_type()?.is_file() {
let store_name = entry.file_name().into_string()?;
let file = BufReader::new(File::open(entry.path())?);
db.insert(
store_name,
file.lines()
.filter_map(|line| line.ok())
.filter_map(|line| serde_json::from_str::<RecordedEvent>(&line).ok())
.collect(),
);
}
}
Ok(())
}
fn send_all_events(&self, glean: &Glean) -> bool {
let store_names = {
let db = self.event_stores.read().unwrap(); // safe unwrap, only error case is poisoning
db.keys().cloned().collect::<Vec<String>>()
};
let mut ping_sent = false;
for store_name in store_names {
if let Err(err) = glean.submit_ping_by_name(&store_name, Some("startup")) {
log::error!(
"Error flushing existing events to the '{}' ping: {}",
store_name,
err
);
} else {
ping_sent = true;
}
}
ping_sent
}
/// Records an event in the desired stores.
///
/// # Arguments
///
/// * `glean` - The Glean instance.
/// * `meta` - The metadata about the event metric. Used to get the category,
/// name and stores for the metric.
/// * `timestamp` - The timestamp of the event, in milliseconds. Must use a
/// monotonically increasing timer (this value is obtained on the
/// platform-specific side).
/// * `extra` - Extra data values, mapping strings to strings.
pub fn record(
&self,
glean: &Glean,
meta: &CommonMetricData,
timestamp: u64,
extra: Option<HashMap<String, String>>,
) {
// If upload is disabled we don't want to record.
if !glean.is_upload_enabled() {
return;
}
// Create RecordedEvent object, and its JSON form for serialization
// on disk.
let event = RecordedEvent {
timestamp,
category: meta.category.to_string(),
name: meta.name.to_string(),
extra,
};
let event_json = serde_json::to_string(&event).unwrap(); // safe unwrap, event can always be serialized
// Store the event in memory and on disk to each of the stores.
let mut stores_to_submit: Vec<&str> = Vec::new();
{
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
for store_name in meta.send_in_pings.iter() {
let store = db.entry(store_name.to_string()).or_insert_with(Vec::new);
store.push(event.clone());
self.write_event_to_disk(store_name, &event_json);
if store.len() == glean.get_max_events() {
stores_to_submit.push(&store_name);
}
}
}
// If any of the event stores reached maximum size, submit the pings
// containing those events immediately.
for store_name in stores_to_submit {
if let Err(err) = glean.submit_ping_by_name(store_name, Some("max_capacity")) {
log::error!(
"Got more than {} events, but could not send {} ping: {}",
glean.get_max_events(),
store_name,
err
);
}
}
}
/// Writes an event to a single store on disk.
///
/// # Arguments
///
/// * `store_name` - The name of the store.
/// * `event_json` - The event content, as a single-line JSON-encoded string.
fn write_event_to_disk(&self, store_name: &str, event_json: &str) {
let _lock = self.file_lock.write().unwrap(); // safe unwrap, only error case is poisoning
if let Err(err) = OpenOptions::new()
.create(true)
.append(true)
.open(self.path.join(store_name))
.and_then(|mut file| writeln!(file, "{}", event_json))
{
log::error!("IO error writing event to store '{}': {}", store_name, err);
}
}
/// Gets a snapshot of the stored event data as a JsonValue.
///
/// # Arguments
///
/// * `store_name` - The name of the desired store.
/// * `clear_store` - Whether to clear the store after snapshotting.
///
/// # Returns
///
/// A array of events, JSON encoded, if any. Otherwise `None`.
pub fn snapshot_as_json(&self, store_name: &str, clear_store: bool) -> Option<JsonValue> {
let result = {
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
db.get_mut(&store_name.to_string()).and_then(|store| {
if !store.is_empty() {
// Timestamps may have been recorded out-of-order, so sort the events
// by the timestamp.
// We can't insert events in order as-we-go, because we also append
// events to a file on disk, where this would be expensive. Best to
// handle this in every case (whether events came from disk or memory)
// in a single location.
store.sort_by(|a, b| a.timestamp.cmp(&b.timestamp));
let first_timestamp = store[0].timestamp;
Some(JsonValue::from_iter(
store.iter().map(|e| e.serialize_relative(first_timestamp)),
))
} else {
log::error!("Unexpectly got empty event store for '{}'", store_name);
None
}
})
};
if clear_store {
self.event_stores
.write()
.unwrap() // safe unwrap, only error case is poisoning
.remove(&store_name.to_string());
let _lock = self.file_lock.write().unwrap(); // safe unwrap, only error case is poisoning
if let Err(err) = fs::remove_file(self.path.join(store_name)) {
match err.kind() {
std::io::ErrorKind::NotFound => {
// silently drop this error, the file was already non-existing
}
_ => log::error!("Error removing events queue file '{}': {}", store_name, err),
}
}
}
result
}
/// Clears all stored events, both in memory and on-disk.
pub fn clear_all(&self) -> Result<()> {
// safe unwrap, only error case is poisoning
self.event_stores.write().unwrap().clear();
// safe unwrap, only error case is poisoning
let _lock = self.file_lock.write().unwrap();
std::fs::remove_dir_all(&self.path)?;
create_dir_all(&self.path)?;
Ok(())
}
/// **Test-only API (exported for FFI purposes).**
///
/// Returns whether there are any events currently stored for the given even
/// metric.
///
/// This doesn't clear the stored value.
pub fn test_has_value<'a>(&'a self, meta: &'a CommonMetricData, store_name: &str) -> bool {
self.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
.get(&store_name.to_string())
.into_iter()
.flatten()
.any(|event| event.name == meta.name && event.category == meta.category)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the vector of currently stored events for the given event metric in
/// the given store.
///
/// This doesn't clear the stored value.
pub fn test_get_value<'a>(
&'a self,
meta: &'a CommonMetricData,
store_name: &str,
) -> Option<Vec<RecordedEvent>> {
let value: Vec<RecordedEvent> = self
.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
.get(&store_name.to_string())
.into_iter()
.flatten()
.filter(|event| event.name == meta.name && event.category == meta.category)
.cloned()
.collect();
if !value.is_empty() {
Some(value)
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::tests::new_glean;
use crate::CommonMetricData;
#[test]
fn handle_truncated_events_on_disk() {
let t = tempfile::tempdir().unwrap();
{
let db = EventDatabase::new(&t.path().display().to_string()).unwrap();
db.write_event_to_disk("events", "{\"timestamp\": 500");
db.write_event_to_disk("events", "{\"timestamp\"");
db.write_event_to_disk(
"events",
"{\"timestamp\": 501, \"category\": \"ui\", \"name\": \"click\"}",
);
}
{
let db = EventDatabase::new(&t.path().display().to_string()).unwrap();
db.load_events_from_disk().unwrap();
let events = &db.event_stores.read().unwrap()["events"];
assert_eq!(1, events.len());
}
}
#[test]
fn stable_serialization() {
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: Some(data),
};
let event_empty_json = ::serde_json::to_string_pretty(&event_empty).unwrap();
let event_data_json = ::serde_json::to_string_pretty(&event_data).unwrap();
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
}
#[test]
fn deserialize_existing_data() {
let event_empty_json = r#"
{
"timestamp": 2,
"category": "cat",
"name": "name"
}
"#;
let event_data_json = r#"
{
"timestamp": 2,
"category": "cat",
"name": "name",
"extra": {
"a key": "a value"
}
}
"#;
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: Some(data),
};
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
}
#[test]
fn doesnt_record_when_upload_is_disabled() {
let (mut glean, dir) = new_glean(None);
let db = EventDatabase::new(dir.path().to_str().unwrap()).unwrap();
let test_storage = "test-storage";
let test_category = "category";
let test_name = "name";
let test_timestamp = 2;
let test_meta = CommonMetricData::new(test_category, test_name, test_storage);
let event_data = RecordedEvent {
timestamp: test_timestamp,
category: test_category.to_string(),
name: test_name.to_string(),
extra: None,
};
// Upload is not yet disabled,
// so let's check that everything is getting recorded as expected.
db.record(&glean, &test_meta, 2, None);
{
let event_stores = db.event_stores.read().unwrap();
assert_eq!(&event_data, &event_stores.get(test_storage).unwrap()[0]);
assert_eq!(event_stores.get(test_storage).unwrap().len(), 1);
}
glean.set_upload_enabled(false);
// Now that upload is disabled, let's check nothing is recorded.
db.record(&glean, &test_meta, 2, None);
{
let event_stores = db.event_stores.read().unwrap();
assert_eq!(event_stores.get(test_storage).unwrap().len(), 1);
}
}
}

Просмотреть файл

@ -1,206 +1,206 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use once_cell::sync::OnceCell;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// Create the possible ranges in an exponential distribution from `min` to `max` with
/// `bucket_count` buckets.
///
/// This algorithm calculates the bucket sizes using a natural log approach to get `bucket_count` number of buckets,
/// exponentially spaced between `min` and `max`
///
/// Bucket limits are the minimal bucket value.
/// That means values in a bucket `i` are `bucket[i] <= value < bucket[i+1]`.
/// It will always contain an underflow bucket (`< 1`).
fn exponential_range(min: u64, max: u64, bucket_count: usize) -> Vec<u64> {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let log_max = (max as f64).ln();
let mut ranges = Vec::with_capacity(bucket_count);
let mut current = min;
if current == 0 {
current = 1;
}
// undeflow bucket
ranges.push(0);
ranges.push(current);
for i in 2..bucket_count {
let log_current = (current as f64).ln();
let log_ratio = (log_max - log_current) / (bucket_count - i) as f64;
let log_next = log_current + log_ratio;
let next_value = log_next.exp().round() as u64;
current = if next_value > current {
next_value
} else {
current + 1
};
ranges.push(current);
}
ranges
}
/// An exponential bucketing algorithm.
///
/// Buckets are pre-computed at instantiation with an exponential distribution from `min` to `max`
/// and `bucket_count` buckets.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PrecomputedExponential {
// Don't serialize the (potentially large) array of ranges, instead compute them on first
// access.
#[serde(skip)]
bucket_ranges: OnceCell<Vec<u64>>,
min: u64,
max: u64,
bucket_count: usize,
}
impl Bucketing for PrecomputedExponential {
/// Get the bucket for the sample.
///
/// This uses a binary search to locate the index `i` of the bucket such that:
/// bucket[i] <= sample < bucket[i+1]
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
let limit = match self.ranges().binary_search(&sample) {
// Found an exact match to fit it in
Ok(i) => i,
// Sorted it fits after the bucket's limit, therefore it fits into the previous bucket
Err(i) => i - 1,
};
self.ranges()[limit]
}
fn ranges(&self) -> &[u64] {
// Create the exponential range on first access.
self.bucket_ranges
.get_or_init(|| exponential_range(self.min, self.max, self.bucket_count))
}
}
impl Histogram<PrecomputedExponential> {
/// Creates a histogram with `count` exponential buckets in the range `min` to `max`.
pub fn exponential(
min: u64,
max: u64,
bucket_count: usize,
) -> Histogram<PrecomputedExponential> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: PrecomputedExponential {
bucket_ranges: OnceCell::new(),
min,
max,
bucket_count,
},
}
}
}
#[cfg(test)]
mod test {
use super::*;
const DEFAULT_BUCKET_COUNT: usize = 100;
const DEFAULT_RANGE_MIN: u64 = 0;
const DEFAULT_RANGE_MAX: u64 = 60_000;
#[test]
fn can_count() {
let mut hist = Histogram::exponential(1, 500, 10);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn overflow_values_accumulate_in_the_last_bucket() {
let mut hist =
Histogram::exponential(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
hist.accumulate(DEFAULT_RANGE_MAX + 100);
assert_eq!(1, hist.values[&DEFAULT_RANGE_MAX]);
}
#[test]
fn short_exponential_buckets_are_correct() {
let test_buckets = vec![0, 1, 2, 3, 5, 9, 16, 29, 54, 100];
assert_eq!(test_buckets, exponential_range(1, 100, 10));
// There's always a zero bucket, so we increase the lower limit.
assert_eq!(test_buckets, exponential_range(0, 100, 10));
}
#[test]
fn default_exponential_buckets_are_correct() {
// Hand calculated values using current default range 0 - 60000 and bucket count of 100.
// NOTE: The final bucket, regardless of width, represents the overflow bucket to hold any
// values beyond the maximum (in this case the maximum is 60000)
let test_buckets = vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 21, 23, 25, 28, 31, 34,
38, 42, 46, 51, 56, 62, 68, 75, 83, 92, 101, 111, 122, 135, 149, 164, 181, 200, 221,
244, 269, 297, 328, 362, 399, 440, 485, 535, 590, 651, 718, 792, 874, 964, 1064, 1174,
1295, 1429, 1577, 1740, 1920, 2118, 2337, 2579, 2846, 3140, 3464, 3822, 4217, 4653,
5134, 5665, 6250, 6896, 7609, 8395, 9262, 10219, 11275, 12440, 13726, 15144, 16709,
18436, 20341, 22443, 24762, 27321, 30144, 33259, 36696, 40488, 44672, 49288, 54381,
60000,
];
assert_eq!(
test_buckets,
exponential_range(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT)
);
}
#[test]
fn default_buckets_correctly_accumulate() {
let mut hist =
Histogram::exponential(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
for i in &[1, 10, 100, 1000, 10000] {
hist.accumulate(*i);
}
assert_eq!(11111, hist.sum());
assert_eq!(5, hist.count());
assert_eq!(None, hist.values.get(&0)); // underflow is empty
assert_eq!(1, hist.values[&1]); // bucket_ranges[1] = 1
assert_eq!(1, hist.values[&10]); // bucket_ranges[10] = 10
assert_eq!(1, hist.values[&92]); // bucket_ranges[33] = 92
assert_eq!(1, hist.values[&964]); // bucket_ranges[57] = 964
assert_eq!(1, hist.values[&9262]); // bucket_ranges[80] = 9262
}
#[test]
fn accumulate_large_numbers() {
let mut hist = Histogram::exponential(1, 500, 10);
hist.accumulate(u64::max_value());
hist.accumulate(u64::max_value());
assert_eq!(2, hist.count());
// Saturate before overflowing
assert_eq!(u64::max_value(), hist.sum());
assert_eq!(2, hist.values[&500]);
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use once_cell::sync::OnceCell;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// Create the possible ranges in an exponential distribution from `min` to `max` with
/// `bucket_count` buckets.
///
/// This algorithm calculates the bucket sizes using a natural log approach to get `bucket_count` number of buckets,
/// exponentially spaced between `min` and `max`
///
/// Bucket limits are the minimal bucket value.
/// That means values in a bucket `i` are `bucket[i] <= value < bucket[i+1]`.
/// It will always contain an underflow bucket (`< 1`).
fn exponential_range(min: u64, max: u64, bucket_count: usize) -> Vec<u64> {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let log_max = (max as f64).ln();
let mut ranges = Vec::with_capacity(bucket_count);
let mut current = min;
if current == 0 {
current = 1;
}
// undeflow bucket
ranges.push(0);
ranges.push(current);
for i in 2..bucket_count {
let log_current = (current as f64).ln();
let log_ratio = (log_max - log_current) / (bucket_count - i) as f64;
let log_next = log_current + log_ratio;
let next_value = log_next.exp().round() as u64;
current = if next_value > current {
next_value
} else {
current + 1
};
ranges.push(current);
}
ranges
}
/// An exponential bucketing algorithm.
///
/// Buckets are pre-computed at instantiation with an exponential distribution from `min` to `max`
/// and `bucket_count` buckets.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PrecomputedExponential {
// Don't serialize the (potentially large) array of ranges, instead compute them on first
// access.
#[serde(skip)]
bucket_ranges: OnceCell<Vec<u64>>,
min: u64,
max: u64,
bucket_count: usize,
}
impl Bucketing for PrecomputedExponential {
/// Get the bucket for the sample.
///
/// This uses a binary search to locate the index `i` of the bucket such that:
/// bucket[i] <= sample < bucket[i+1]
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
let limit = match self.ranges().binary_search(&sample) {
// Found an exact match to fit it in
Ok(i) => i,
// Sorted it fits after the bucket's limit, therefore it fits into the previous bucket
Err(i) => i - 1,
};
self.ranges()[limit]
}
fn ranges(&self) -> &[u64] {
// Create the exponential range on first access.
self.bucket_ranges
.get_or_init(|| exponential_range(self.min, self.max, self.bucket_count))
}
}
impl Histogram<PrecomputedExponential> {
/// Creates a histogram with `count` exponential buckets in the range `min` to `max`.
pub fn exponential(
min: u64,
max: u64,
bucket_count: usize,
) -> Histogram<PrecomputedExponential> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: PrecomputedExponential {
bucket_ranges: OnceCell::new(),
min,
max,
bucket_count,
},
}
}
}
#[cfg(test)]
mod test {
use super::*;
const DEFAULT_BUCKET_COUNT: usize = 100;
const DEFAULT_RANGE_MIN: u64 = 0;
const DEFAULT_RANGE_MAX: u64 = 60_000;
#[test]
fn can_count() {
let mut hist = Histogram::exponential(1, 500, 10);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn overflow_values_accumulate_in_the_last_bucket() {
let mut hist =
Histogram::exponential(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
hist.accumulate(DEFAULT_RANGE_MAX + 100);
assert_eq!(1, hist.values[&DEFAULT_RANGE_MAX]);
}
#[test]
fn short_exponential_buckets_are_correct() {
let test_buckets = vec![0, 1, 2, 3, 5, 9, 16, 29, 54, 100];
assert_eq!(test_buckets, exponential_range(1, 100, 10));
// There's always a zero bucket, so we increase the lower limit.
assert_eq!(test_buckets, exponential_range(0, 100, 10));
}
#[test]
fn default_exponential_buckets_are_correct() {
// Hand calculated values using current default range 0 - 60000 and bucket count of 100.
// NOTE: The final bucket, regardless of width, represents the overflow bucket to hold any
// values beyond the maximum (in this case the maximum is 60000)
let test_buckets = vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 21, 23, 25, 28, 31, 34,
38, 42, 46, 51, 56, 62, 68, 75, 83, 92, 101, 111, 122, 135, 149, 164, 181, 200, 221,
244, 269, 297, 328, 362, 399, 440, 485, 535, 590, 651, 718, 792, 874, 964, 1064, 1174,
1295, 1429, 1577, 1740, 1920, 2118, 2337, 2579, 2846, 3140, 3464, 3822, 4217, 4653,
5134, 5665, 6250, 6896, 7609, 8395, 9262, 10219, 11275, 12440, 13726, 15144, 16709,
18436, 20341, 22443, 24762, 27321, 30144, 33259, 36696, 40488, 44672, 49288, 54381,
60000,
];
assert_eq!(
test_buckets,
exponential_range(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT)
);
}
#[test]
fn default_buckets_correctly_accumulate() {
let mut hist =
Histogram::exponential(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
for i in &[1, 10, 100, 1000, 10000] {
hist.accumulate(*i);
}
assert_eq!(11111, hist.sum());
assert_eq!(5, hist.count());
assert_eq!(None, hist.values.get(&0)); // underflow is empty
assert_eq!(1, hist.values[&1]); // bucket_ranges[1] = 1
assert_eq!(1, hist.values[&10]); // bucket_ranges[10] = 10
assert_eq!(1, hist.values[&92]); // bucket_ranges[33] = 92
assert_eq!(1, hist.values[&964]); // bucket_ranges[57] = 964
assert_eq!(1, hist.values[&9262]); // bucket_ranges[80] = 9262
}
#[test]
fn accumulate_large_numbers() {
let mut hist = Histogram::exponential(1, 500, 10);
hist.accumulate(u64::max_value());
hist.accumulate(u64::max_value());
assert_eq!(2, hist.count());
// Saturate before overflowing
assert_eq!(u64::max_value(), hist.sum());
assert_eq!(2, hist.values[&500]);
}
}

Просмотреть файл

@ -1,174 +1,174 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// A functional bucketing algorithm.
///
/// Bucketing is performed by a function, rather than pre-computed buckets.
/// The bucket index of a given sample is determined with the following function:
///
/// i = ⌊n log<sub>base</sub>(𝑥)⌋
///
/// In other words, there are n buckets for each power of `base` magnitude.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Functional {
exponent: f64,
}
impl Functional {
/// Instantiate a new functional bucketing.
fn new(log_base: f64, buckets_per_magnitude: f64) -> Functional {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let exponent = log_base.powf(1.0 / buckets_per_magnitude);
Functional { exponent }
}
/// Maps a sample to a "bucket index" that it belongs in.
/// A "bucket index" is the consecutive integer index of each bucket, useful as a
/// mathematical concept, even though the internal representation is stored and
/// sent using the minimum value in each bucket.
fn sample_to_bucket_index(&self, sample: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
((sample.saturating_add(1)) as f64).log(self.exponent) as u64
}
/// Determines the minimum value of a bucket, given a bucket index.
fn bucket_index_to_bucket_minimum(&self, index: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
self.exponent.powf(index as f64) as u64
}
}
impl Bucketing for Functional {
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
if sample == 0 {
return 0;
}
let index = self.sample_to_bucket_index(sample);
self.bucket_index_to_bucket_minimum(index)
}
fn ranges(&self) -> &[u64] {
unimplemented!("Bucket ranges for functional bucketing are not precomputed")
}
}
impl Histogram<Functional> {
/// Creates a histogram with functional buckets.
pub fn functional(log_base: f64, buckets_per_magnitude: f64) -> Histogram<Functional> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: Functional::new(log_base, buckets_per_magnitude),
}
}
/// Gets a snapshot of all contiguous values.
///
/// **Caution** This is a more specific implementation of `snapshot_values` on functional
/// histograms. `snapshot_values` cannot be used with those, due to buckets not being
/// precomputed.
pub fn snapshot(&self) -> HashMap<u64, u64> {
if self.values.is_empty() {
return HashMap::new();
}
let mut min_key = None;
let mut max_key = None;
// `Iterator#min` and `Iterator#max` would do the same job independently,
// but we want to avoid iterating the keys twice, so we loop ourselves.
for key in self.values.keys() {
let key = *key;
// safe unwrap, we checked it's not none
if min_key.is_none() || key < min_key.unwrap() {
min_key = Some(key);
}
// safe unwrap, we checked it's not none
if max_key.is_none() || key > max_key.unwrap() {
max_key = Some(key);
}
}
// Non-empty values, therefore minimum/maximum exists.
// safe unwraps, we set it at least once.
let min_bucket = self.bucketing.sample_to_bucket_index(min_key.unwrap());
let max_bucket = self.bucketing.sample_to_bucket_index(max_key.unwrap()) + 1;
let mut values = self.values.clone();
for idx in min_bucket..=max_bucket {
// Fill in missing entries.
let min_bucket = self.bucketing.bucket_index_to_bucket_minimum(idx);
let _ = values.entry(min_bucket).or_insert(0);
}
values
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_count() {
let mut hist = Histogram::functional(2.0, 8.0);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn sample_to_bucket_minimum_correctly_rounds_down() {
let hist = Histogram::functional(2.0, 8.0);
// Check each of the first 100 integers, where numerical accuracy of the round-tripping
// is most potentially problematic
for value in 0..100 {
let bucket_minimum = hist.bucketing.sample_to_bucket_minimum(value);
assert!(bucket_minimum <= value);
assert_eq!(
bucket_minimum,
hist.bucketing.sample_to_bucket_minimum(bucket_minimum)
);
}
// Do an exponential sampling of higher numbers
for i in 11..500 {
let value = 1.5f64.powi(i);
let value = value as u64;
let bucket_minimum = hist.bucketing.sample_to_bucket_minimum(value);
assert!(bucket_minimum <= value);
assert_eq!(
bucket_minimum,
hist.bucketing.sample_to_bucket_minimum(bucket_minimum)
);
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// A functional bucketing algorithm.
///
/// Bucketing is performed by a function, rather than pre-computed buckets.
/// The bucket index of a given sample is determined with the following function:
///
/// i = ⌊n log<sub>base</sub>(𝑥)⌋
///
/// In other words, there are n buckets for each power of `base` magnitude.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Functional {
exponent: f64,
}
impl Functional {
/// Instantiate a new functional bucketing.
fn new(log_base: f64, buckets_per_magnitude: f64) -> Functional {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let exponent = log_base.powf(1.0 / buckets_per_magnitude);
Functional { exponent }
}
/// Maps a sample to a "bucket index" that it belongs in.
/// A "bucket index" is the consecutive integer index of each bucket, useful as a
/// mathematical concept, even though the internal representation is stored and
/// sent using the minimum value in each bucket.
fn sample_to_bucket_index(&self, sample: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
((sample.saturating_add(1)) as f64).log(self.exponent) as u64
}
/// Determines the minimum value of a bucket, given a bucket index.
fn bucket_index_to_bucket_minimum(&self, index: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
self.exponent.powf(index as f64) as u64
}
}
impl Bucketing for Functional {
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
if sample == 0 {
return 0;
}
let index = self.sample_to_bucket_index(sample);
self.bucket_index_to_bucket_minimum(index)
}
fn ranges(&self) -> &[u64] {
unimplemented!("Bucket ranges for functional bucketing are not precomputed")
}
}
impl Histogram<Functional> {
/// Creates a histogram with functional buckets.
pub fn functional(log_base: f64, buckets_per_magnitude: f64) -> Histogram<Functional> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: Functional::new(log_base, buckets_per_magnitude),
}
}
/// Gets a snapshot of all contiguous values.
///
/// **Caution** This is a more specific implementation of `snapshot_values` on functional
/// histograms. `snapshot_values` cannot be used with those, due to buckets not being
/// precomputed.
pub fn snapshot(&self) -> HashMap<u64, u64> {
if self.values.is_empty() {
return HashMap::new();
}
let mut min_key = None;
let mut max_key = None;
// `Iterator#min` and `Iterator#max` would do the same job independently,
// but we want to avoid iterating the keys twice, so we loop ourselves.
for key in self.values.keys() {
let key = *key;
// safe unwrap, we checked it's not none
if min_key.is_none() || key < min_key.unwrap() {
min_key = Some(key);
}
// safe unwrap, we checked it's not none
if max_key.is_none() || key > max_key.unwrap() {
max_key = Some(key);
}
}
// Non-empty values, therefore minimum/maximum exists.
// safe unwraps, we set it at least once.
let min_bucket = self.bucketing.sample_to_bucket_index(min_key.unwrap());
let max_bucket = self.bucketing.sample_to_bucket_index(max_key.unwrap()) + 1;
let mut values = self.values.clone();
for idx in min_bucket..=max_bucket {
// Fill in missing entries.
let min_bucket = self.bucketing.bucket_index_to_bucket_minimum(idx);
let _ = values.entry(min_bucket).or_insert(0);
}
values
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_count() {
let mut hist = Histogram::functional(2.0, 8.0);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn sample_to_bucket_minimum_correctly_rounds_down() {
let hist = Histogram::functional(2.0, 8.0);
// Check each of the first 100 integers, where numerical accuracy of the round-tripping
// is most potentially problematic
for value in 0..100 {
let bucket_minimum = hist.bucketing.sample_to_bucket_minimum(value);
assert!(bucket_minimum <= value);
assert_eq!(
bucket_minimum,
hist.bucketing.sample_to_bucket_minimum(bucket_minimum)
);
}
// Do an exponential sampling of higher numbers
for i in 11..500 {
let value = 1.5f64.powi(i);
let value = value as u64;
let bucket_minimum = hist.bucketing.sample_to_bucket_minimum(value);
assert!(bucket_minimum <= value);
assert_eq!(
bucket_minimum,
hist.bucketing.sample_to_bucket_minimum(bucket_minimum)
);
}
}
}

Просмотреть файл

@ -1,178 +1,178 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::cmp;
use std::collections::HashMap;
use once_cell::sync::OnceCell;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
/// Create the possible ranges in a linear distribution from `min` to `max` with
/// `bucket_count` buckets.
///
/// This algorithm calculates `bucket_count` number of buckets of equal sizes between `min` and `max`.
///
/// Bucket limits are the minimal bucket value.
/// That means values in a bucket `i` are `bucket[i] <= value < bucket[i+1]`.
/// It will always contain an underflow bucket (`< 1`).
fn linear_range(min: u64, max: u64, count: usize) -> Vec<u64> {
let mut ranges = Vec::with_capacity(count);
ranges.push(0);
let min = cmp::max(1, min);
let count = count as u64;
for i in 1..count {
let range = (min * (count - 1 - i) + max * (i - 1)) / (count - 2);
ranges.push(range);
}
ranges
}
/// A linear bucketing algorithm.
///
/// Buckets are pre-computed at instantiation with a linear distribution from `min` to `max`
/// and `bucket_count` buckets.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PrecomputedLinear {
// Don't serialize the (potentially large) array of ranges, instead compute them on first
// access.
#[serde(skip)]
bucket_ranges: OnceCell<Vec<u64>>,
min: u64,
max: u64,
bucket_count: usize,
}
impl Bucketing for PrecomputedLinear {
/// Get the bucket for the sample.
///
/// This uses a binary search to locate the index `i` of the bucket such that:
/// bucket[i] <= sample < bucket[i+1]
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
let limit = match self.ranges().binary_search(&sample) {
// Found an exact match to fit it in
Ok(i) => i,
// Sorted it fits after the bucket's limit, therefore it fits into the previous bucket
Err(i) => i - 1,
};
self.ranges()[limit]
}
fn ranges(&self) -> &[u64] {
// Create the linear range on first access.
self.bucket_ranges
.get_or_init(|| linear_range(self.min, self.max, self.bucket_count))
}
}
impl Histogram<PrecomputedLinear> {
/// Creates a histogram with `bucket_count` linear buckets in the range `min` to `max`.
pub fn linear(min: u64, max: u64, bucket_count: usize) -> Histogram<PrecomputedLinear> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: PrecomputedLinear {
bucket_ranges: OnceCell::new(),
min,
max,
bucket_count,
},
}
}
}
#[cfg(test)]
mod test {
use super::*;
const DEFAULT_BUCKET_COUNT: usize = 100;
const DEFAULT_RANGE_MIN: u64 = 0;
const DEFAULT_RANGE_MAX: u64 = 100;
#[test]
fn can_count() {
let mut hist = Histogram::linear(1, 500, 10);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn overflow_values_accumulate_in_the_last_bucket() {
let mut hist =
Histogram::linear(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
hist.accumulate(DEFAULT_RANGE_MAX + 100);
assert_eq!(1, hist.values[&DEFAULT_RANGE_MAX]);
}
#[test]
fn short_linear_buckets_are_correct() {
let test_buckets = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 10];
assert_eq!(test_buckets, linear_range(1, 10, 10));
// There's always a zero bucket, so we increase the lower limit.
assert_eq!(test_buckets, linear_range(0, 10, 10));
}
#[test]
fn long_linear_buckets_are_correct() {
// Hand calculated values using current default range 0 - 60000 and bucket count of 100.
// NOTE: The final bucket, regardless of width, represents the overflow bucket to hold any
// values beyond the maximum (in this case the maximum is 60000)
let test_buckets = vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 100,
];
assert_eq!(
test_buckets,
linear_range(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT)
);
}
#[test]
fn default_buckets_correctly_accumulate() {
let mut hist =
Histogram::linear(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
for i in &[1, 10, 100, 1000, 10000] {
hist.accumulate(*i);
}
assert_eq!(11111, hist.sum());
assert_eq!(5, hist.count());
assert_eq!(None, hist.values.get(&0));
assert_eq!(1, hist.values[&1]);
assert_eq!(1, hist.values[&10]);
assert_eq!(3, hist.values[&100]);
}
#[test]
fn accumulate_large_numbers() {
let mut hist = Histogram::linear(1, 500, 10);
hist.accumulate(u64::max_value());
hist.accumulate(u64::max_value());
assert_eq!(2, hist.count());
// Saturate before overflowing
assert_eq!(u64::max_value(), hist.sum());
assert_eq!(2, hist.values[&500]);
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::cmp;
use std::collections::HashMap;
use once_cell::sync::OnceCell;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
/// Create the possible ranges in a linear distribution from `min` to `max` with
/// `bucket_count` buckets.
///
/// This algorithm calculates `bucket_count` number of buckets of equal sizes between `min` and `max`.
///
/// Bucket limits are the minimal bucket value.
/// That means values in a bucket `i` are `bucket[i] <= value < bucket[i+1]`.
/// It will always contain an underflow bucket (`< 1`).
fn linear_range(min: u64, max: u64, count: usize) -> Vec<u64> {
let mut ranges = Vec::with_capacity(count);
ranges.push(0);
let min = cmp::max(1, min);
let count = count as u64;
for i in 1..count {
let range = (min * (count - 1 - i) + max * (i - 1)) / (count - 2);
ranges.push(range);
}
ranges
}
/// A linear bucketing algorithm.
///
/// Buckets are pre-computed at instantiation with a linear distribution from `min` to `max`
/// and `bucket_count` buckets.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PrecomputedLinear {
// Don't serialize the (potentially large) array of ranges, instead compute them on first
// access.
#[serde(skip)]
bucket_ranges: OnceCell<Vec<u64>>,
min: u64,
max: u64,
bucket_count: usize,
}
impl Bucketing for PrecomputedLinear {
/// Get the bucket for the sample.
///
/// This uses a binary search to locate the index `i` of the bucket such that:
/// bucket[i] <= sample < bucket[i+1]
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
let limit = match self.ranges().binary_search(&sample) {
// Found an exact match to fit it in
Ok(i) => i,
// Sorted it fits after the bucket's limit, therefore it fits into the previous bucket
Err(i) => i - 1,
};
self.ranges()[limit]
}
fn ranges(&self) -> &[u64] {
// Create the linear range on first access.
self.bucket_ranges
.get_or_init(|| linear_range(self.min, self.max, self.bucket_count))
}
}
impl Histogram<PrecomputedLinear> {
/// Creates a histogram with `bucket_count` linear buckets in the range `min` to `max`.
pub fn linear(min: u64, max: u64, bucket_count: usize) -> Histogram<PrecomputedLinear> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: PrecomputedLinear {
bucket_ranges: OnceCell::new(),
min,
max,
bucket_count,
},
}
}
}
#[cfg(test)]
mod test {
use super::*;
const DEFAULT_BUCKET_COUNT: usize = 100;
const DEFAULT_RANGE_MIN: u64 = 0;
const DEFAULT_RANGE_MAX: u64 = 100;
#[test]
fn can_count() {
let mut hist = Histogram::linear(1, 500, 10);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn overflow_values_accumulate_in_the_last_bucket() {
let mut hist =
Histogram::linear(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
hist.accumulate(DEFAULT_RANGE_MAX + 100);
assert_eq!(1, hist.values[&DEFAULT_RANGE_MAX]);
}
#[test]
fn short_linear_buckets_are_correct() {
let test_buckets = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 10];
assert_eq!(test_buckets, linear_range(1, 10, 10));
// There's always a zero bucket, so we increase the lower limit.
assert_eq!(test_buckets, linear_range(0, 10, 10));
}
#[test]
fn long_linear_buckets_are_correct() {
// Hand calculated values using current default range 0 - 60000 and bucket count of 100.
// NOTE: The final bucket, regardless of width, represents the overflow bucket to hold any
// values beyond the maximum (in this case the maximum is 60000)
let test_buckets = vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 100,
];
assert_eq!(
test_buckets,
linear_range(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT)
);
}
#[test]
fn default_buckets_correctly_accumulate() {
let mut hist =
Histogram::linear(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
for i in &[1, 10, 100, 1000, 10000] {
hist.accumulate(*i);
}
assert_eq!(11111, hist.sum());
assert_eq!(5, hist.count());
assert_eq!(None, hist.values.get(&0));
assert_eq!(1, hist.values[&1]);
assert_eq!(1, hist.values[&10]);
assert_eq!(3, hist.values[&100]);
}
#[test]
fn accumulate_large_numbers() {
let mut hist = Histogram::linear(1, 500, 10);
hist.accumulate(u64::max_value());
hist.accumulate(u64::max_value());
assert_eq!(2, hist.count());
// Saturate before overflowing
assert_eq!(u64::max_value(), hist.sum());
assert_eq!(2, hist.values[&500]);
}
}

Просмотреть файл

@ -1,139 +1,139 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! A simple histogram implementation for exponential histograms.
use std::collections::HashMap;
use std::convert::TryFrom;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
pub use exponential::PrecomputedExponential;
pub use functional::Functional;
pub use linear::PrecomputedLinear;
mod exponential;
mod functional;
mod linear;
/// Different kinds of histograms.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HistogramType {
/// A histogram with linear distributed buckets.
Linear,
/// A histogram with exponential distributed buckets.
Exponential,
}
impl TryFrom<i32> for HistogramType {
type Error = Error;
fn try_from(value: i32) -> Result<HistogramType, Self::Error> {
match value {
0 => Ok(HistogramType::Linear),
1 => Ok(HistogramType::Exponential),
e => Err(ErrorKind::HistogramType(e).into()),
}
}
}
/// A histogram.
///
/// Stores the counts per bucket and tracks the count of added samples and the total sum.
/// The bucketing algorithm can be changed.
///
/// ## Example
///
/// ```rust,ignore
/// let mut hist = Histogram::exponential(1, 500, 10);
///
/// for i in 1..=10 {
/// hist.accumulate(i);
/// }
///
/// assert_eq!(10, hist.count());
/// assert_eq!(55, hist.sum());
/// ```
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Histogram<B> {
/// Mapping bucket's minimum to sample count.
values: HashMap<u64, u64>,
/// The count of samples added.
count: u64,
/// The total sum of samples.
sum: u64,
/// The bucketing algorithm used.
bucketing: B,
}
/// A bucketing algorithm for histograms.
///
/// It's responsible to calculate the bucket a sample goes into.
/// It can calculate buckets on-the-fly or pre-calculate buckets and re-use that when needed.
pub trait Bucketing {
/// Get the bucket's minimum value the sample falls into.
fn sample_to_bucket_minimum(&self, sample: u64) -> u64;
/// The computed bucket ranges for this bucketing algorithm.
fn ranges(&self) -> &[u64];
}
impl<B: Bucketing> Histogram<B> {
/// Gets the number of buckets in this histogram.
pub fn bucket_count(&self) -> usize {
self.values.len()
}
/// Adds a single value to this histogram.
pub fn accumulate(&mut self, sample: u64) {
let bucket_min = self.bucketing.sample_to_bucket_minimum(sample);
let entry = self.values.entry(bucket_min).or_insert(0);
*entry += 1;
self.sum = self.sum.saturating_add(sample);
self.count += 1;
}
/// Gets the total sum of values recorded in this histogram.
pub fn sum(&self) -> u64 {
self.sum
}
/// Gets the total count of values recorded in this histogram.
pub fn count(&self) -> u64 {
self.count
}
/// Gets the filled values.
pub fn values(&self) -> &HashMap<u64, u64> {
&self.values
}
/// Checks if this histogram recorded any values.
pub fn is_empty(&self) -> bool {
self.count() == 0
}
/// Gets a snapshot of all values from the first bucket until one past the last filled bucket,
/// filling in empty buckets with 0.
pub fn snapshot_values(&self) -> HashMap<u64, u64> {
let mut res = self.values.clone();
let max_bucket = self.values.keys().max().cloned().unwrap_or(0);
for &min_bucket in self.bucketing.ranges() {
// Fill in missing entries.
let _ = res.entry(min_bucket).or_insert(0);
// stop one after the last filled bucket
if min_bucket > max_bucket {
break;
}
}
res
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! A simple histogram implementation for exponential histograms.
use std::collections::HashMap;
use std::convert::TryFrom;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
pub use exponential::PrecomputedExponential;
pub use functional::Functional;
pub use linear::PrecomputedLinear;
mod exponential;
mod functional;
mod linear;
/// Different kinds of histograms.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HistogramType {
/// A histogram with linear distributed buckets.
Linear,
/// A histogram with exponential distributed buckets.
Exponential,
}
impl TryFrom<i32> for HistogramType {
type Error = Error;
fn try_from(value: i32) -> Result<HistogramType, Self::Error> {
match value {
0 => Ok(HistogramType::Linear),
1 => Ok(HistogramType::Exponential),
e => Err(ErrorKind::HistogramType(e).into()),
}
}
}
/// A histogram.
///
/// Stores the counts per bucket and tracks the count of added samples and the total sum.
/// The bucketing algorithm can be changed.
///
/// ## Example
///
/// ```rust,ignore
/// let mut hist = Histogram::exponential(1, 500, 10);
///
/// for i in 1..=10 {
/// hist.accumulate(i);
/// }
///
/// assert_eq!(10, hist.count());
/// assert_eq!(55, hist.sum());
/// ```
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Histogram<B> {
/// Mapping bucket's minimum to sample count.
values: HashMap<u64, u64>,
/// The count of samples added.
count: u64,
/// The total sum of samples.
sum: u64,
/// The bucketing algorithm used.
bucketing: B,
}
/// A bucketing algorithm for histograms.
///
/// It's responsible to calculate the bucket a sample goes into.
/// It can calculate buckets on-the-fly or pre-calculate buckets and re-use that when needed.
pub trait Bucketing {
/// Get the bucket's minimum value the sample falls into.
fn sample_to_bucket_minimum(&self, sample: u64) -> u64;
/// The computed bucket ranges for this bucketing algorithm.
fn ranges(&self) -> &[u64];
}
impl<B: Bucketing> Histogram<B> {
/// Gets the number of buckets in this histogram.
pub fn bucket_count(&self) -> usize {
self.values.len()
}
/// Adds a single value to this histogram.
pub fn accumulate(&mut self, sample: u64) {
let bucket_min = self.bucketing.sample_to_bucket_minimum(sample);
let entry = self.values.entry(bucket_min).or_insert(0);
*entry += 1;
self.sum = self.sum.saturating_add(sample);
self.count += 1;
}
/// Gets the total sum of values recorded in this histogram.
pub fn sum(&self) -> u64 {
self.sum
}
/// Gets the total count of values recorded in this histogram.
pub fn count(&self) -> u64 {
self.count
}
/// Gets the filled values.
pub fn values(&self) -> &HashMap<u64, u64> {
&self.values
}
/// Checks if this histogram recorded any values.
pub fn is_empty(&self) -> bool {
self.count() == 0
}
/// Gets a snapshot of all values from the first bucket until one past the last filled bucket,
/// filling in empty buckets with 0.
pub fn snapshot_values(&self) -> HashMap<u64, u64> {
let mut res = self.values.clone();
let max_bucket = self.values.keys().max().cloned().unwrap_or(0);
for &min_bucket in self.bucketing.ranges() {
// Fill in missing entries.
let _ = res.entry(min_bucket).or_insert(0);
// stop one after the last filled bucket
if min_bucket > max_bucket {
break;
}
}
res
}
}

Просмотреть файл

@ -1,146 +1,146 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use super::{metrics::*, CommonMetricData, Lifetime};
#[derive(Debug)]
pub struct CoreMetrics {
pub client_id: UuidMetric,
pub first_run_date: DatetimeMetric,
pub os: StringMetric,
}
impl CoreMetrics {
pub fn new() -> CoreMetrics {
CoreMetrics {
client_id: UuidMetric::new(CommonMetricData {
name: "client_id".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::User,
disabled: false,
dynamic_label: None,
}),
first_run_date: DatetimeMetric::new(
CommonMetricData {
name: "first_run_date".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::User,
disabled: false,
dynamic_label: None,
},
TimeUnit::Day,
),
os: StringMetric::new(CommonMetricData {
name: "os".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
}
}
}
#[derive(Debug)]
pub struct UploadMetrics {
pub ping_upload_failure: LabeledMetric<CounterMetric>,
pub discarded_exceeding_pings_size: MemoryDistributionMetric,
pub pending_pings_directory_size: MemoryDistributionMetric,
pub deleted_pings_after_quota_hit: CounterMetric,
pub pending_pings: CounterMetric,
}
impl UploadMetrics {
pub fn new() -> UploadMetrics {
UploadMetrics {
ping_upload_failure: LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "ping_upload_failure".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
Some(vec![
"status_code_4xx".into(),
"status_code_5xx".into(),
"status_code_unknown".into(),
"unrecoverable".into(),
"recoverable".into(),
]),
),
discarded_exceeding_pings_size: MemoryDistributionMetric::new(
CommonMetricData {
name: "discarded_exceeding_ping_size".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Kilobyte,
),
pending_pings_directory_size: MemoryDistributionMetric::new(
CommonMetricData {
name: "pending_pings_directory_size".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Kilobyte,
),
deleted_pings_after_quota_hit: CounterMetric::new(CommonMetricData {
name: "deleted_pings_after_quota_hit".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
pending_pings: CounterMetric::new(CommonMetricData {
name: "pending_pings".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
}
}
}
#[derive(Debug)]
pub struct DatabaseMetrics {
pub size: MemoryDistributionMetric,
}
impl DatabaseMetrics {
pub fn new() -> DatabaseMetrics {
DatabaseMetrics {
size: MemoryDistributionMetric::new(
CommonMetricData {
name: "size".into(),
category: "glean.database".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Byte,
),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use super::{metrics::*, CommonMetricData, Lifetime};
#[derive(Debug)]
pub struct CoreMetrics {
pub client_id: UuidMetric,
pub first_run_date: DatetimeMetric,
pub os: StringMetric,
}
impl CoreMetrics {
pub fn new() -> CoreMetrics {
CoreMetrics {
client_id: UuidMetric::new(CommonMetricData {
name: "client_id".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::User,
disabled: false,
dynamic_label: None,
}),
first_run_date: DatetimeMetric::new(
CommonMetricData {
name: "first_run_date".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::User,
disabled: false,
dynamic_label: None,
},
TimeUnit::Day,
),
os: StringMetric::new(CommonMetricData {
name: "os".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
}
}
}
#[derive(Debug)]
pub struct UploadMetrics {
pub ping_upload_failure: LabeledMetric<CounterMetric>,
pub discarded_exceeding_pings_size: MemoryDistributionMetric,
pub pending_pings_directory_size: MemoryDistributionMetric,
pub deleted_pings_after_quota_hit: CounterMetric,
pub pending_pings: CounterMetric,
}
impl UploadMetrics {
pub fn new() -> UploadMetrics {
UploadMetrics {
ping_upload_failure: LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "ping_upload_failure".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
Some(vec![
"status_code_4xx".into(),
"status_code_5xx".into(),
"status_code_unknown".into(),
"unrecoverable".into(),
"recoverable".into(),
]),
),
discarded_exceeding_pings_size: MemoryDistributionMetric::new(
CommonMetricData {
name: "discarded_exceeding_ping_size".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Kilobyte,
),
pending_pings_directory_size: MemoryDistributionMetric::new(
CommonMetricData {
name: "pending_pings_directory_size".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Kilobyte,
),
deleted_pings_after_quota_hit: CounterMetric::new(CommonMetricData {
name: "deleted_pings_after_quota_hit".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
pending_pings: CounterMetric::new(CommonMetricData {
name: "pending_pings".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
}
}
}
#[derive(Debug)]
pub struct DatabaseMetrics {
pub size: MemoryDistributionMetric,
}
impl DatabaseMetrics {
pub fn new() -> DatabaseMetrics {
DatabaseMetrics {
size: MemoryDistributionMetric::new(
CommonMetricData {
name: "size".into(),
category: "glean.database".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Byte,
),
}
}
}

1842
third_party/rust/glean-core/src/lib.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,69 +1,69 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A boolean metric.
///
/// Records a simple flag.
#[derive(Clone, Debug)]
pub struct BooleanMetric {
meta: CommonMetricData,
}
impl MetricType for BooleanMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl BooleanMetric {
/// Creates a new boolean metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified boolean value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - the value to set.
pub fn set(&self, glean: &Glean, value: bool) {
if !self.should_record(glean) {
return;
}
let value = Metric::Boolean(value);
glean.storage().record(glean, &self.meta, &value)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a boolean.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<bool> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Boolean(b)) => Some(b),
_ => None,
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A boolean metric.
///
/// Records a simple flag.
#[derive(Clone, Debug)]
pub struct BooleanMetric {
meta: CommonMetricData,
}
impl MetricType for BooleanMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl BooleanMetric {
/// Creates a new boolean metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified boolean value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - the value to set.
pub fn set(&self, glean: &Glean, value: bool) {
if !self.should_record(glean) {
return;
}
let value = Metric::Boolean(value);
glean.storage().record(glean, &self.meta, &value)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a boolean.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<bool> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Boolean(b)) => Some(b),
_ => None,
}
}
}

Просмотреть файл

@ -1,92 +1,92 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A counter metric.
///
/// Used to count things.
/// The value can only be incremented, not decremented.
#[derive(Clone, Debug)]
pub struct CounterMetric {
meta: CommonMetricData,
}
impl MetricType for CounterMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl CounterMetric {
/// Creates a new counter metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Increases the counter by `amount`.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
///
/// Logs an error if the `amount` is 0 or negative.
pub fn add(&self, glean: &Glean, amount: i32) {
if !self.should_record(glean) {
return;
}
if amount <= 0 {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
format!("Added negative or zero value {}", amount),
None,
);
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::Counter(old_value)) => {
Metric::Counter(old_value.saturating_add(amount))
}
_ => Metric::Counter(amount),
})
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<i32> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Counter(i)) => Some(i),
_ => None,
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A counter metric.
///
/// Used to count things.
/// The value can only be incremented, not decremented.
#[derive(Clone, Debug)]
pub struct CounterMetric {
meta: CommonMetricData,
}
impl MetricType for CounterMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl CounterMetric {
/// Creates a new counter metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Increases the counter by `amount`.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
///
/// Logs an error if the `amount` is 0 or negative.
pub fn add(&self, glean: &Glean, amount: i32) {
if !self.should_record(glean) {
return;
}
if amount <= 0 {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
format!("Added negative or zero value {}", amount),
None,
);
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::Counter(old_value)) => {
Metric::Counter(old_value.saturating_add(amount))
}
_ => Metric::Counter(amount),
})
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<i32> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Counter(i)) => Some(i),
_ => None,
}
}
}

Просмотреть файл

@ -1,186 +1,186 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Bucketing, Histogram, HistogramType};
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A custom distribution metric.
///
/// Memory distributions are used to accumulate and store memory sizes.
#[derive(Debug)]
pub struct CustomDistributionMetric {
meta: CommonMetricData,
range_min: u64,
range_max: u64,
bucket_count: u64,
histogram_type: HistogramType,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot<B: Bucketing>(hist: &Histogram<B>) -> DistributionData {
DistributionData {
values: hist.snapshot_values(),
sum: hist.sum(),
}
}
impl MetricType for CustomDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl CustomDistributionMetric {
/// Creates a new memory distribution metric.
pub fn new(
meta: CommonMetricData,
range_min: u64,
range_max: u64,
bucket_count: u64,
histogram_type: HistogramType,
) -> Self {
Self {
meta,
range_min,
range_max,
bucket_count,
histogram_type,
}
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// # Arguments
///
/// - `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
// Generic accumulation function to handle the different histogram types and count negative
// samples.
fn accumulate<B: Bucketing, F>(
samples: &[i64],
mut hist: Histogram<B>,
metric: F,
) -> (i32, Metric)
where
F: Fn(Histogram<B>) -> Metric,
{
let mut num_negative_samples = 0;
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
hist.accumulate(sample);
}
}
(num_negative_samples, metric(hist))
}
glean.storage().record_with(glean, &self.meta, |old_value| {
let (num_negative, hist) = match self.histogram_type {
HistogramType::Linear => {
let hist = if let Some(Metric::CustomDistributionLinear(hist)) = old_value {
hist
} else {
Histogram::linear(
self.range_min,
self.range_max,
self.bucket_count as usize,
)
};
accumulate(&samples, hist, Metric::CustomDistributionLinear)
}
HistogramType::Exponential => {
let hist = if let Some(Metric::CustomDistributionExponential(hist)) = old_value
{
hist
} else {
Histogram::exponential(
self.range_min,
self.range_max,
self.bucket_count as usize,
)
};
accumulate(&samples, hist, Metric::CustomDistributionExponential)
}
};
num_negative_samples = num_negative;
hist
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored histogram.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
// Boxing the value, in order to return either of the possible buckets
Some(Metric::CustomDistributionExponential(hist)) => Some(snapshot(&hist)),
Some(Metric::CustomDistributionLinear(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Bucketing, Histogram, HistogramType};
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A custom distribution metric.
///
/// Memory distributions are used to accumulate and store memory sizes.
#[derive(Debug)]
pub struct CustomDistributionMetric {
meta: CommonMetricData,
range_min: u64,
range_max: u64,
bucket_count: u64,
histogram_type: HistogramType,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot<B: Bucketing>(hist: &Histogram<B>) -> DistributionData {
DistributionData {
values: hist.snapshot_values(),
sum: hist.sum(),
}
}
impl MetricType for CustomDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl CustomDistributionMetric {
/// Creates a new memory distribution metric.
pub fn new(
meta: CommonMetricData,
range_min: u64,
range_max: u64,
bucket_count: u64,
histogram_type: HistogramType,
) -> Self {
Self {
meta,
range_min,
range_max,
bucket_count,
histogram_type,
}
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// # Arguments
///
/// - `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
// Generic accumulation function to handle the different histogram types and count negative
// samples.
fn accumulate<B: Bucketing, F>(
samples: &[i64],
mut hist: Histogram<B>,
metric: F,
) -> (i32, Metric)
where
F: Fn(Histogram<B>) -> Metric,
{
let mut num_negative_samples = 0;
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
hist.accumulate(sample);
}
}
(num_negative_samples, metric(hist))
}
glean.storage().record_with(glean, &self.meta, |old_value| {
let (num_negative, hist) = match self.histogram_type {
HistogramType::Linear => {
let hist = if let Some(Metric::CustomDistributionLinear(hist)) = old_value {
hist
} else {
Histogram::linear(
self.range_min,
self.range_max,
self.bucket_count as usize,
)
};
accumulate(&samples, hist, Metric::CustomDistributionLinear)
}
HistogramType::Exponential => {
let hist = if let Some(Metric::CustomDistributionExponential(hist)) = old_value
{
hist
} else {
Histogram::exponential(
self.range_min,
self.range_max,
self.bucket_count as usize,
)
};
accumulate(&samples, hist, Metric::CustomDistributionExponential)
}
};
num_negative_samples = num_negative;
hist
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored histogram.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
// Boxing the value, in order to return either of the possible buckets
Some(Metric::CustomDistributionExponential(hist)) => Some(snapshot(&hist)),
Some(Metric::CustomDistributionLinear(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}

Просмотреть файл

@ -1,163 +1,163 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![allow(clippy::too_many_arguments)]
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::{get_iso_time_string, local_now_with_offset};
use crate::CommonMetricData;
use crate::Glean;
use chrono::{DateTime, FixedOffset, TimeZone};
/// A datetime type.
///
/// Used to feed data to the `DatetimeMetric`.
pub type Datetime = DateTime<FixedOffset>;
/// A datetime metric.
///
/// Used to record an absolute date and time, such as the time the user first ran
/// the application.
#[derive(Debug)]
pub struct DatetimeMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
}
impl MetricType for DatetimeMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl DatetimeMetric {
/// Creates a new datetime metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self { meta, time_unit }
}
/// Sets the metric to a date/time including the timezone offset.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `year` - the year to set the metric to.
/// * `month` - the month to set the metric to (1-12).
/// * `day` - the day to set the metric to (1-based).
/// * `hour` - the hour to set the metric to.
/// * `minute` - the minute to set the metric to.
/// * `second` - the second to set the metric to.
/// * `nano` - the nanosecond fraction to the last whole second.
/// * `offset_seconds` - the timezone difference, in seconds, for the Eastern
/// Hemisphere. Negative seconds mean Western Hemisphere.
pub fn set_with_details(
&self,
glean: &Glean,
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
nano: u32,
offset_seconds: i32,
) {
if !self.should_record(glean) {
return;
}
let timezone_offset = FixedOffset::east_opt(offset_seconds);
if timezone_offset.is_none() {
let msg = format!("Invalid timezone offset {}. Not recording.", offset_seconds);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
return;
};
let datetime_obj = FixedOffset::east(offset_seconds)
.ymd_opt(year, month, day)
.and_hms_nano_opt(hour, minute, second, nano);
match datetime_obj.single() {
Some(d) => self.set(glean, Some(d)),
_ => {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
"Invalid input data. Not recording.",
None,
);
}
}
}
/// Sets the metric to a date/time which including the timezone offset.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - Some date/time value, with offset, to set the metric to.
/// If none, the current local time is used.
pub fn set(&self, glean: &Glean, value: Option<Datetime>) {
if !self.should_record(glean) {
return;
}
let value = value.unwrap_or_else(local_now_with_offset);
let value = Metric::Datetime(value, self.time_unit);
glean.storage().record(glean, &self.meta, &value)
}
/// Gets the stored datetime value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `storage_name` - the storage name to look into.
///
/// # Returns
///
/// The stored value or `None` if nothing stored.
pub(crate) fn get_value(&self, glean: &Glean, storage_name: &str) -> Option<Datetime> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta().identifier(glean),
) {
Some(Metric::Datetime(dt, _)) => Some(dt),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a String.
///
/// The precision of this value is truncated to the `time_unit` precision.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_string(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Datetime(d, tu)) => Some(get_iso_time_string(d, tu)),
_ => None,
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![allow(clippy::too_many_arguments)]
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::{get_iso_time_string, local_now_with_offset};
use crate::CommonMetricData;
use crate::Glean;
use chrono::{DateTime, FixedOffset, TimeZone};
/// A datetime type.
///
/// Used to feed data to the `DatetimeMetric`.
pub type Datetime = DateTime<FixedOffset>;
/// A datetime metric.
///
/// Used to record an absolute date and time, such as the time the user first ran
/// the application.
#[derive(Debug)]
pub struct DatetimeMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
}
impl MetricType for DatetimeMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl DatetimeMetric {
/// Creates a new datetime metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self { meta, time_unit }
}
/// Sets the metric to a date/time including the timezone offset.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `year` - the year to set the metric to.
/// * `month` - the month to set the metric to (1-12).
/// * `day` - the day to set the metric to (1-based).
/// * `hour` - the hour to set the metric to.
/// * `minute` - the minute to set the metric to.
/// * `second` - the second to set the metric to.
/// * `nano` - the nanosecond fraction to the last whole second.
/// * `offset_seconds` - the timezone difference, in seconds, for the Eastern
/// Hemisphere. Negative seconds mean Western Hemisphere.
pub fn set_with_details(
&self,
glean: &Glean,
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
nano: u32,
offset_seconds: i32,
) {
if !self.should_record(glean) {
return;
}
let timezone_offset = FixedOffset::east_opt(offset_seconds);
if timezone_offset.is_none() {
let msg = format!("Invalid timezone offset {}. Not recording.", offset_seconds);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
return;
};
let datetime_obj = FixedOffset::east(offset_seconds)
.ymd_opt(year, month, day)
.and_hms_nano_opt(hour, minute, second, nano);
match datetime_obj.single() {
Some(d) => self.set(glean, Some(d)),
_ => {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
"Invalid input data. Not recording.",
None,
);
}
}
}
/// Sets the metric to a date/time which including the timezone offset.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - Some date/time value, with offset, to set the metric to.
/// If none, the current local time is used.
pub fn set(&self, glean: &Glean, value: Option<Datetime>) {
if !self.should_record(glean) {
return;
}
let value = value.unwrap_or_else(local_now_with_offset);
let value = Metric::Datetime(value, self.time_unit);
glean.storage().record(glean, &self.meta, &value)
}
/// Gets the stored datetime value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `storage_name` - the storage name to look into.
///
/// # Returns
///
/// The stored value or `None` if nothing stored.
pub(crate) fn get_value(&self, glean: &Glean, storage_name: &str) -> Option<Datetime> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta().identifier(glean),
) {
Some(Metric::Datetime(dt, _)) => Some(dt),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a String.
///
/// The precision of this value is truncated to the `time_unit` precision.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_string(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Datetime(d, tu)) => Some(get_iso_time_string(d, tu)),
_ => None,
}
}
}

Просмотреть файл

@ -1,139 +1,139 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde_json::{json, Value as JsonValue};
use crate::error_recording::{record_error, ErrorType};
use crate::event_database::RecordedEvent;
use crate::metrics::MetricType;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
use crate::Glean;
const MAX_LENGTH_EXTRA_KEY_VALUE: usize = 100;
/// An event metric.
///
/// Events allow recording of e.g. individual occurences of user actions, say
/// every time a view was open and from where. Each time you record an event, it
/// records a timestamp, the event's name and a set of custom values.
#[derive(Clone, Debug)]
pub struct EventMetric {
meta: CommonMetricData,
allowed_extra_keys: Vec<String>,
}
impl MetricType for EventMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl EventMetric {
/// Creates a new event metric.
pub fn new(meta: CommonMetricData, allowed_extra_keys: Vec<String>) -> Self {
Self {
meta,
allowed_extra_keys,
}
}
/// Records an event.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `timestamp` - A monotonically increasing timestamp, in milliseconds.
/// This must be provided since the actual recording of the event may
/// happen some time later than the moment the event occurred.
/// * `extra` - A HashMap of (key, value) pairs. The key is an index into
/// the metric's `allowed_extra_keys` vector where the key's string is
/// looked up. If any key index is out of range, an error is reported and
/// no event is recorded.
pub fn record<M: Into<Option<HashMap<i32, String>>>>(
&self,
glean: &Glean,
timestamp: u64,
extra: M,
) {
if !self.should_record(glean) {
return;
}
let extra = extra.into();
let extra_strings: Option<HashMap<String, String>> = if let Some(extra) = extra {
if extra.is_empty() {
None
} else {
let mut extra_strings = HashMap::new();
for (k, v) in extra.into_iter() {
match self.allowed_extra_keys.get(k as usize) {
Some(k) => extra_strings.insert(
k.to_string(),
truncate_string_at_boundary_with_error(
glean,
&self.meta,
v,
MAX_LENGTH_EXTRA_KEY_VALUE,
),
),
None => {
let msg = format!("Invalid key index {}", k);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
return;
}
};
}
Some(extra_strings)
}
} else {
None
};
glean
.event_storage()
.record(glean, &self.meta, timestamp, extra_strings);
}
/// **Test-only API (exported for FFI purposes).**
///
/// Tests whether there are currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_has_value(&self, glean: &Glean, store_name: &str) -> bool {
glean.event_storage().test_has_value(&self.meta, store_name)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the vector of currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, store_name: &str) -> Option<Vec<RecordedEvent>> {
glean.event_storage().test_get_value(&self.meta, store_name)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored events for this event metric as a JSON-encoded string.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(&self, glean: &Glean, store_name: &str) -> String {
match self.test_get_value(glean, store_name) {
Some(value) => json!(value),
None => json!(JsonValue::Null),
}
.to_string()
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde_json::{json, Value as JsonValue};
use crate::error_recording::{record_error, ErrorType};
use crate::event_database::RecordedEvent;
use crate::metrics::MetricType;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
use crate::Glean;
const MAX_LENGTH_EXTRA_KEY_VALUE: usize = 100;
/// An event metric.
///
/// Events allow recording of e.g. individual occurences of user actions, say
/// every time a view was open and from where. Each time you record an event, it
/// records a timestamp, the event's name and a set of custom values.
#[derive(Clone, Debug)]
pub struct EventMetric {
meta: CommonMetricData,
allowed_extra_keys: Vec<String>,
}
impl MetricType for EventMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl EventMetric {
/// Creates a new event metric.
pub fn new(meta: CommonMetricData, allowed_extra_keys: Vec<String>) -> Self {
Self {
meta,
allowed_extra_keys,
}
}
/// Records an event.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `timestamp` - A monotonically increasing timestamp, in milliseconds.
/// This must be provided since the actual recording of the event may
/// happen some time later than the moment the event occurred.
/// * `extra` - A HashMap of (key, value) pairs. The key is an index into
/// the metric's `allowed_extra_keys` vector where the key's string is
/// looked up. If any key index is out of range, an error is reported and
/// no event is recorded.
pub fn record<M: Into<Option<HashMap<i32, String>>>>(
&self,
glean: &Glean,
timestamp: u64,
extra: M,
) {
if !self.should_record(glean) {
return;
}
let extra = extra.into();
let extra_strings: Option<HashMap<String, String>> = if let Some(extra) = extra {
if extra.is_empty() {
None
} else {
let mut extra_strings = HashMap::new();
for (k, v) in extra.into_iter() {
match self.allowed_extra_keys.get(k as usize) {
Some(k) => extra_strings.insert(
k.to_string(),
truncate_string_at_boundary_with_error(
glean,
&self.meta,
v,
MAX_LENGTH_EXTRA_KEY_VALUE,
),
),
None => {
let msg = format!("Invalid key index {}", k);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
return;
}
};
}
Some(extra_strings)
}
} else {
None
};
glean
.event_storage()
.record(glean, &self.meta, timestamp, extra_strings);
}
/// **Test-only API (exported for FFI purposes).**
///
/// Tests whether there are currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_has_value(&self, glean: &Glean, store_name: &str) -> bool {
glean.event_storage().test_has_value(&self.meta, store_name)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the vector of currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, store_name: &str) -> Option<Vec<RecordedEvent>> {
glean.event_storage().test_get_value(&self.meta, store_name)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored events for this event metric as a JSON-encoded string.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(&self, glean: &Glean, store_name: &str) -> String {
match self.test_get_value(glean, store_name) {
Some(value) => json!(value),
None => json!(JsonValue::Null),
}
.to_string()
}
}

Просмотреть файл

@ -1,290 +1,290 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use serde::{Deserialize, Serialize};
use serde_json::{json, Map as JsonMap, Value as JsonValue};
use std::collections::HashMap;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::{truncate_string_at_boundary, truncate_string_at_boundary_with_error};
use crate::CommonMetricData;
use crate::Glean;
use crate::Lifetime;
use crate::INTERNAL_STORAGE;
/// The maximum length of the experiment id, the branch id, and the keys of the
/// `extra` map. Identifiers longer than this number of characters are truncated.
const MAX_EXPERIMENTS_IDS_LEN: usize = 100;
/// The maximum length of the experiment `extra` values. Values longer than this
/// limit will be truncated.
const MAX_EXPERIMENT_VALUE_LEN: usize = MAX_EXPERIMENTS_IDS_LEN;
/// The maximum number of extras allowed in the `extra` hash map. Any items added
/// beyond this limit will be dropped. Note that truncation of a hash map is
/// nondeterministic in which items are truncated.
const MAX_EXPERIMENTS_EXTRAS_SIZE: usize = 20;
/// The data for a single experiment.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct RecordedExperimentData {
pub branch: String,
pub extra: Option<HashMap<String, String>>,
}
impl RecordedExperimentData {
/// Gets the recorded experiment data as a JSON value.
///
/// For JSON, we don't want to include `{"extra": null}` -- we just want to skip
/// `extra` entirely. Unfortunately, we can't use a serde field annotation for this,
/// since that would break bincode serialization, which doesn't support skipping
/// fields. Therefore, we use a custom serialization function just for JSON here.
pub fn as_json(&self) -> JsonValue {
let mut value = JsonMap::new();
value.insert("branch".to_string(), json!(self.branch));
if self.extra.is_some() {
value.insert("extra".to_string(), json!(self.extra));
}
JsonValue::Object(value)
}
}
/// An experiment metric.
///
/// Used to store active experiments.
/// This is used through the `set_experiment_active`/`set_experiment_inactive` Glean SDK API.
#[derive(Clone, Debug)]
pub struct ExperimentMetric {
meta: CommonMetricData,
}
impl MetricType for ExperimentMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl ExperimentMetric {
/// Creates a new experiment metric.
///
/// # Arguments
///
/// * `id` - the id of the experiment. Please note that this will be
/// truncated to `MAX_EXPERIMENTS_IDS_LEN`, if needed.
pub fn new(glean: &Glean, id: String) -> Self {
let mut error = None;
// Make sure that experiment id is within the expected limit.
let truncated_id = if id.len() > MAX_EXPERIMENTS_IDS_LEN {
let msg = format!(
"Value length {} for experiment id exceeds maximum of {}",
id.len(),
MAX_EXPERIMENTS_IDS_LEN
);
error = Some(msg);
truncate_string_at_boundary(id, MAX_EXPERIMENTS_IDS_LEN)
} else {
id
};
let new_experiment = Self {
meta: CommonMetricData {
name: format!("{}#experiment", truncated_id),
// We don't need a category, the name is already unique
category: "".into(),
send_in_pings: vec![INTERNAL_STORAGE.into()],
lifetime: Lifetime::Application,
..Default::default()
},
};
// Check for a truncation error to record
if let Some(msg) = error {
record_error(
glean,
&new_experiment.meta,
ErrorType::InvalidValue,
msg,
None,
);
}
new_experiment
}
/// Records an experiment as active.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `branch` - the active branch of the experiment. Please note that this will be
/// truncated to `MAX_EXPERIMENTS_IDS_LEN`, if needed.
/// * `extra` - an optional, user defined String to String map used to provide richer
/// experiment context if needed.
pub fn set_active(
&self,
glean: &Glean,
branch: String,
extra: Option<HashMap<String, String>>,
) {
if !self.should_record(glean) {
return;
}
// Make sure that branch id is within the expected limit.
let truncated_branch = if branch.len() > MAX_EXPERIMENTS_IDS_LEN {
truncate_string_at_boundary_with_error(
glean,
&self.meta,
branch,
MAX_EXPERIMENTS_IDS_LEN,
)
} else {
branch
};
// Apply limits to extras
let truncated_extras = extra.map(|extra| {
if extra.len() > MAX_EXPERIMENTS_EXTRAS_SIZE {
let msg = format!(
"Extra hash map length {} exceeds maximum of {}",
extra.len(),
MAX_EXPERIMENTS_EXTRAS_SIZE
);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
}
let mut temp_map = HashMap::new();
for (key, value) in extra.into_iter().take(MAX_EXPERIMENTS_EXTRAS_SIZE) {
let truncated_key = if key.len() > MAX_EXPERIMENTS_IDS_LEN {
truncate_string_at_boundary_with_error(
glean,
&self.meta,
key,
MAX_EXPERIMENTS_IDS_LEN,
)
} else {
key
};
let truncated_value = if value.len() > MAX_EXPERIMENT_VALUE_LEN {
truncate_string_at_boundary_with_error(
glean,
&self.meta,
value,
MAX_EXPERIMENT_VALUE_LEN,
)
} else {
value
};
temp_map.insert(truncated_key, truncated_value);
}
temp_map
});
let value = Metric::Experiment(RecordedExperimentData {
branch: truncated_branch,
extra: truncated_extras,
});
glean.storage().record(glean, &self.meta, &value)
}
/// Records an experiment as inactive.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
pub fn set_inactive(&self, glean: &Glean) {
if !self.should_record(glean) {
return;
}
if let Err(e) = glean.storage().remove_single_metric(
Lifetime::Application,
INTERNAL_STORAGE,
&self.meta.name,
) {
log::error!("Failed to set experiment as inactive: {:?}", e);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored experiment data as a JSON representation of
/// the RecordedExperimentData.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(&self, glean: &Glean) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
INTERNAL_STORAGE,
&self.meta.identifier(glean),
) {
Some(Metric::Experiment(e)) => Some(json!(e).to_string()),
_ => None,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn stable_serialization() {
let experiment_empty = RecordedExperimentData {
branch: "branch".into(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let experiment_data = RecordedExperimentData {
branch: "branch".into(),
extra: Some(data),
};
let experiment_empty_bin = bincode::serialize(&experiment_empty).unwrap();
let experiment_data_bin = bincode::serialize(&experiment_data).unwrap();
assert_eq!(
experiment_empty,
bincode::deserialize(&experiment_empty_bin).unwrap()
);
assert_eq!(
experiment_data,
bincode::deserialize(&experiment_data_bin).unwrap()
);
}
#[test]
#[rustfmt::skip] // Let's not add newlines unnecessary
fn deserialize_old_encoding() {
// generated by `bincode::serialize` as of Glean commit ac27fceb7c0d5a7288d7d569e8c5c5399a53afb2
// empty was generated from: `RecordedExperimentData { branch: "branch".into(), extra: None, }`
let empty_bin = vec![6, 0, 0, 0, 0, 0, 0, 0, 98, 114, 97, 110, 99, 104];
// data was generated from: RecordedExperimentData { branch: "branch".into(), extra: Some({"a key": "a value"}), };
let data_bin = vec![6, 0, 0, 0, 0, 0, 0, 0, 98, 114, 97, 110, 99, 104,
1, 1, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0,
97, 32, 107, 101, 121, 7, 0, 0, 0, 0, 0, 0, 0, 97,
32, 118, 97, 108, 117, 101];
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let experiment_data = RecordedExperimentData { branch: "branch".into(), extra: Some(data), };
// We can't actually decode old experiment data.
// Luckily Glean did store experiments in the database before commit ac27fceb7c0d5a7288d7d569e8c5c5399a53afb2.
let experiment_empty: Result<RecordedExperimentData, _> = bincode::deserialize(&empty_bin);
assert!(experiment_empty.is_err());
assert_eq!(experiment_data, bincode::deserialize(&data_bin).unwrap());
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use serde::{Deserialize, Serialize};
use serde_json::{json, Map as JsonMap, Value as JsonValue};
use std::collections::HashMap;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::{truncate_string_at_boundary, truncate_string_at_boundary_with_error};
use crate::CommonMetricData;
use crate::Glean;
use crate::Lifetime;
use crate::INTERNAL_STORAGE;
/// The maximum length of the experiment id, the branch id, and the keys of the
/// `extra` map. Identifiers longer than this number of characters are truncated.
const MAX_EXPERIMENTS_IDS_LEN: usize = 100;
/// The maximum length of the experiment `extra` values. Values longer than this
/// limit will be truncated.
const MAX_EXPERIMENT_VALUE_LEN: usize = MAX_EXPERIMENTS_IDS_LEN;
/// The maximum number of extras allowed in the `extra` hash map. Any items added
/// beyond this limit will be dropped. Note that truncation of a hash map is
/// nondeterministic in which items are truncated.
const MAX_EXPERIMENTS_EXTRAS_SIZE: usize = 20;
/// The data for a single experiment.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct RecordedExperimentData {
pub branch: String,
pub extra: Option<HashMap<String, String>>,
}
impl RecordedExperimentData {
/// Gets the recorded experiment data as a JSON value.
///
/// For JSON, we don't want to include `{"extra": null}` -- we just want to skip
/// `extra` entirely. Unfortunately, we can't use a serde field annotation for this,
/// since that would break bincode serialization, which doesn't support skipping
/// fields. Therefore, we use a custom serialization function just for JSON here.
pub fn as_json(&self) -> JsonValue {
let mut value = JsonMap::new();
value.insert("branch".to_string(), json!(self.branch));
if self.extra.is_some() {
value.insert("extra".to_string(), json!(self.extra));
}
JsonValue::Object(value)
}
}
/// An experiment metric.
///
/// Used to store active experiments.
/// This is used through the `set_experiment_active`/`set_experiment_inactive` Glean SDK API.
#[derive(Clone, Debug)]
pub struct ExperimentMetric {
meta: CommonMetricData,
}
impl MetricType for ExperimentMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl ExperimentMetric {
/// Creates a new experiment metric.
///
/// # Arguments
///
/// * `id` - the id of the experiment. Please note that this will be
/// truncated to `MAX_EXPERIMENTS_IDS_LEN`, if needed.
pub fn new(glean: &Glean, id: String) -> Self {
let mut error = None;
// Make sure that experiment id is within the expected limit.
let truncated_id = if id.len() > MAX_EXPERIMENTS_IDS_LEN {
let msg = format!(
"Value length {} for experiment id exceeds maximum of {}",
id.len(),
MAX_EXPERIMENTS_IDS_LEN
);
error = Some(msg);
truncate_string_at_boundary(id, MAX_EXPERIMENTS_IDS_LEN)
} else {
id
};
let new_experiment = Self {
meta: CommonMetricData {
name: format!("{}#experiment", truncated_id),
// We don't need a category, the name is already unique
category: "".into(),
send_in_pings: vec![INTERNAL_STORAGE.into()],
lifetime: Lifetime::Application,
..Default::default()
},
};
// Check for a truncation error to record
if let Some(msg) = error {
record_error(
glean,
&new_experiment.meta,
ErrorType::InvalidValue,
msg,
None,
);
}
new_experiment
}
/// Records an experiment as active.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `branch` - the active branch of the experiment. Please note that this will be
/// truncated to `MAX_EXPERIMENTS_IDS_LEN`, if needed.
/// * `extra` - an optional, user defined String to String map used to provide richer
/// experiment context if needed.
pub fn set_active(
&self,
glean: &Glean,
branch: String,
extra: Option<HashMap<String, String>>,
) {
if !self.should_record(glean) {
return;
}
// Make sure that branch id is within the expected limit.
let truncated_branch = if branch.len() > MAX_EXPERIMENTS_IDS_LEN {
truncate_string_at_boundary_with_error(
glean,
&self.meta,
branch,
MAX_EXPERIMENTS_IDS_LEN,
)
} else {
branch
};
// Apply limits to extras
let truncated_extras = extra.map(|extra| {
if extra.len() > MAX_EXPERIMENTS_EXTRAS_SIZE {
let msg = format!(
"Extra hash map length {} exceeds maximum of {}",
extra.len(),
MAX_EXPERIMENTS_EXTRAS_SIZE
);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
}
let mut temp_map = HashMap::new();
for (key, value) in extra.into_iter().take(MAX_EXPERIMENTS_EXTRAS_SIZE) {
let truncated_key = if key.len() > MAX_EXPERIMENTS_IDS_LEN {
truncate_string_at_boundary_with_error(
glean,
&self.meta,
key,
MAX_EXPERIMENTS_IDS_LEN,
)
} else {
key
};
let truncated_value = if value.len() > MAX_EXPERIMENT_VALUE_LEN {
truncate_string_at_boundary_with_error(
glean,
&self.meta,
value,
MAX_EXPERIMENT_VALUE_LEN,
)
} else {
value
};
temp_map.insert(truncated_key, truncated_value);
}
temp_map
});
let value = Metric::Experiment(RecordedExperimentData {
branch: truncated_branch,
extra: truncated_extras,
});
glean.storage().record(glean, &self.meta, &value)
}
/// Records an experiment as inactive.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
pub fn set_inactive(&self, glean: &Glean) {
if !self.should_record(glean) {
return;
}
if let Err(e) = glean.storage().remove_single_metric(
Lifetime::Application,
INTERNAL_STORAGE,
&self.meta.name,
) {
log::error!("Failed to set experiment as inactive: {:?}", e);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored experiment data as a JSON representation of
/// the RecordedExperimentData.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(&self, glean: &Glean) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
INTERNAL_STORAGE,
&self.meta.identifier(glean),
) {
Some(Metric::Experiment(e)) => Some(json!(e).to_string()),
_ => None,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn stable_serialization() {
let experiment_empty = RecordedExperimentData {
branch: "branch".into(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let experiment_data = RecordedExperimentData {
branch: "branch".into(),
extra: Some(data),
};
let experiment_empty_bin = bincode::serialize(&experiment_empty).unwrap();
let experiment_data_bin = bincode::serialize(&experiment_data).unwrap();
assert_eq!(
experiment_empty,
bincode::deserialize(&experiment_empty_bin).unwrap()
);
assert_eq!(
experiment_data,
bincode::deserialize(&experiment_data_bin).unwrap()
);
}
#[test]
#[rustfmt::skip] // Let's not add newlines unnecessary
fn deserialize_old_encoding() {
// generated by `bincode::serialize` as of Glean commit ac27fceb7c0d5a7288d7d569e8c5c5399a53afb2
// empty was generated from: `RecordedExperimentData { branch: "branch".into(), extra: None, }`
let empty_bin = vec![6, 0, 0, 0, 0, 0, 0, 0, 98, 114, 97, 110, 99, 104];
// data was generated from: RecordedExperimentData { branch: "branch".into(), extra: Some({"a key": "a value"}), };
let data_bin = vec![6, 0, 0, 0, 0, 0, 0, 0, 98, 114, 97, 110, 99, 104,
1, 1, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0,
97, 32, 107, 101, 121, 7, 0, 0, 0, 0, 0, 0, 0, 97,
32, 118, 97, 108, 117, 101];
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let experiment_data = RecordedExperimentData { branch: "branch".into(), extra: Some(data), };
// We can't actually decode old experiment data.
// Luckily Glean did store experiments in the database before commit ac27fceb7c0d5a7288d7d569e8c5c5399a53afb2.
let experiment_empty: Result<RecordedExperimentData, _> = bincode::deserialize(&empty_bin);
assert!(experiment_empty.is_err());
assert_eq!(experiment_data, bincode::deserialize(&data_bin).unwrap());
}
}

944
third_party/rust/glean-core/src/metrics/jwe.rs поставляемый
Просмотреть файл

@ -1,472 +1,472 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::fmt;
use std::str::FromStr;
use serde::Serialize;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::{Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
const DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT: usize = 1024;
/// Verifies if a string is [`BASE64URL`](https://tools.ietf.org/html/rfc4648#section-5) compliant.
///
/// As such, the string must match the regex: `[a-zA-Z0-9\-\_]*`.
///
/// > **Note** As described in the [JWS specification](https://tools.ietf.org/html/rfc7515#section-2),
/// > the BASE64URL encoding used by JWE discards any padding,
/// > that is why we can ignore that for this validation.
///
/// The regex crate isn't used here because it adds to the binary size,
/// and the Glean SDK doesn't use regular expressions anywhere else.
fn validate_base64url_encoding(value: &str) -> bool {
let mut iter = value.chars();
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('_') | Some('-') | Some('a'..='z') | Some('A'..='Z') | Some('0'..='9') => (),
// An invalid character.
Some(_) => return false,
}
}
}
/// Representation of a [JWE](https://tools.ietf.org/html/rfc7516).
///
/// **Note** Variable sized elements will be constrained to a length of DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT,
/// this is a constraint introduced by Glean to prevent abuses and not part of the spec.
#[derive(Serialize)]
struct Jwe {
/// A variable-size JWE protected header.
header: String,
/// A variable-size [encrypted key](https://tools.ietf.org/html/rfc7516#appendix-A.1.3).
/// This can be an empty octet sequence.
key: String,
/// A fixed-size, 96-bit, base64 encoded [JWE Initialization vector](https://tools.ietf.org/html/rfc7516#appendix-A.1.4) (e.g. “48V1_ALb6US04U3b”).
/// If not required by the encryption algorithm, can be an empty octet sequence.
init_vector: String,
/// The variable-size base64 encoded cipher text.
cipher_text: String,
/// A fixed-size, 132-bit, base64 encoded authentication tag.
/// Can be an empty octet sequence.
auth_tag: String,
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl Jwe {
/// Create a new JWE struct.
fn new<S: Into<String>>(
header: S,
key: S,
init_vector: S,
cipher_text: S,
auth_tag: S,
) -> Result<Self, (ErrorType, String)> {
let mut header = header.into();
header = Self::validate_non_empty("header", header)?;
header = Self::validate_max_size("header", header)?;
header = Self::validate_base64url_encoding("header", header)?;
let mut key = key.into();
key = Self::validate_max_size("key", key)?;
key = Self::validate_base64url_encoding("key", key)?;
let mut init_vector = init_vector.into();
init_vector = Self::validate_fixed_size_or_empty("init_vector", init_vector, 96)?;
init_vector = Self::validate_base64url_encoding("init_vector", init_vector)?;
let mut cipher_text = cipher_text.into();
cipher_text = Self::validate_non_empty("cipher_text", cipher_text)?;
cipher_text = Self::validate_max_size("cipher_text", cipher_text)?;
cipher_text = Self::validate_base64url_encoding("cipher_text", cipher_text)?;
let mut auth_tag = auth_tag.into();
auth_tag = Self::validate_fixed_size_or_empty("auth_tag", auth_tag, 128)?;
auth_tag = Self::validate_base64url_encoding("auth_tag", auth_tag)?;
Ok(Self {
header,
key,
init_vector,
cipher_text,
auth_tag,
})
}
fn validate_base64url_encoding(
name: &str,
value: String,
) -> Result<String, (ErrorType, String)> {
if !validate_base64url_encoding(&value) {
return Err((
ErrorType::InvalidValue,
format!("`{}` element in JWE value is not valid BASE64URL.", name),
));
}
Ok(value)
}
fn validate_non_empty(name: &str, value: String) -> Result<String, (ErrorType, String)> {
if value.is_empty() {
return Err((
ErrorType::InvalidValue,
format!("`{}` element in JWE value must not be empty.", name),
));
}
Ok(value)
}
fn validate_max_size(name: &str, value: String) -> Result<String, (ErrorType, String)> {
if value.len() > DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT {
return Err((
ErrorType::InvalidOverflow,
format!(
"`{}` element in JWE value must not exceed {} characters.",
name, DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT
),
));
}
Ok(value)
}
fn validate_fixed_size_or_empty(
name: &str,
value: String,
size_in_bits: usize,
) -> Result<String, (ErrorType, String)> {
// Each Base64 digit represents exactly 6 bits of data.
// By dividing the size_in_bits by 6 and ceiling the result,
// we get the amount of characters the value should have.
let num_chars = (size_in_bits as f32 / 6f32).ceil() as usize;
if !value.is_empty() && value.len() != num_chars {
return Err((
ErrorType::InvalidOverflow,
format!(
"`{}` element in JWE value must have exactly {}-bits or be empty.",
name, size_in_bits
),
));
}
Ok(value)
}
}
/// Trait implementation to convert a JWE [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7)
/// string into a Jwe struct.
impl FromStr for Jwe {
type Err = (ErrorType, String);
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut elements: Vec<&str> = s.split('.').collect();
if elements.len() != 5 {
return Err((
ErrorType::InvalidValue,
"JWE value is not formatted as expected.".into(),
));
}
// Consume the vector extracting each part of the JWE from it.
//
// Safe unwraps, we already defined that the slice has five elements.
let auth_tag = elements.pop().unwrap();
let cipher_text = elements.pop().unwrap();
let init_vector = elements.pop().unwrap();
let key = elements.pop().unwrap();
let header = elements.pop().unwrap();
Self::new(header, key, init_vector, cipher_text, auth_tag)
}
}
/// Trait implementation to print the Jwe struct as the proper JWE [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7).
impl fmt::Display for Jwe {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}.{}.{}.{}.{}",
self.header, self.key, self.init_vector, self.cipher_text, self.auth_tag
)
}
}
/// A JWE metric.
///
/// This metric will be work as a "transport" for JWE encrypted data.
///
/// The actual encrypti on is done somewhere else,
/// Glean must only make sure the data is valid JWE.
#[derive(Clone, Debug)]
pub struct JweMetric {
meta: CommonMetricData,
}
impl MetricType for JweMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl JweMetric {
/// Creates a new JWE metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified JWE value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - the [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7) of a JWE value.
pub fn set_with_compact_representation<S: Into<String>>(&self, glean: &Glean, value: S) {
if !self.should_record(glean) {
return;
}
let value = value.into();
match Jwe::from_str(&value) {
Ok(_) => glean
.storage()
.record(glean, &self.meta, &Metric::Jwe(value)),
Err((error_type, msg)) => record_error(glean, &self.meta, error_type, msg, None),
};
}
/// Builds a JWE value from its elements and set to it.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `header` - the JWE Protected Header element.
/// * `key` - the JWE Encrypted Key element.
/// * `init_vector` - the JWE Initialization Vector element.
/// * `cipher_text` - the JWE Ciphertext element.
/// * `auth_tag` - the JWE Authentication Tag element.
pub fn set<S: Into<String>>(
&self,
glean: &Glean,
header: S,
key: S,
init_vector: S,
cipher_text: S,
auth_tag: S,
) {
if !self.should_record(glean) {
return;
}
match Jwe::new(header, key, init_vector, cipher_text, auth_tag) {
Ok(jwe) => glean
.storage()
.record(glean, &self.meta, &Metric::Jwe(jwe.to_string())),
Err((error_type, msg)) => record_error(glean, &self.meta, error_type, msg, None),
};
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Jwe(b)) => Some(b),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored JWE as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name).map(|snapshot| {
serde_json::to_string(
&Jwe::from_str(&snapshot).expect("Stored JWE metric should be valid JWE value."),
)
.unwrap()
})
}
}
#[cfg(test)]
mod test {
use super::*;
const HEADER: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ";
const KEY: &str = "OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg";
const INIT_VECTOR: &str = "48V1_ALb6US04U3b";
const CIPHER_TEXT: &str =
"5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A";
const AUTH_TAG: &str = "XFBoMYUZodetZdvTiFvSkQ";
const JWE: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ.OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg.48V1_ALb6US04U3b.5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A.XFBoMYUZodetZdvTiFvSkQ";
#[test]
fn generates_jwe_from_correct_input() {
let jwe = Jwe::from_str(JWE).unwrap();
assert_eq!(jwe.header, HEADER);
assert_eq!(jwe.key, KEY);
assert_eq!(jwe.init_vector, INIT_VECTOR);
assert_eq!(jwe.cipher_text, CIPHER_TEXT);
assert_eq!(jwe.auth_tag, AUTH_TAG);
assert!(Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG).is_ok());
}
#[test]
fn jwe_validates_header_value_correctly() {
// When header is empty, correct error is returned
match Jwe::new("", KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
// When header is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(
too_long,
KEY.into(),
INIT_VECTOR.into(),
CIPHER_TEXT.into(),
AUTH_TAG.into(),
) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When header is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(not64, KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_key_value_correctly() {
// When key is empty,JWE is created
assert!(Jwe::new(HEADER, "", INIT_VECTOR, CIPHER_TEXT, AUTH_TAG).is_ok());
// When key is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(HEADER, &too_long, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When key is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(HEADER, not64, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_init_vector_value_correctly() {
// When init_vector is empty, JWE is created
assert!(Jwe::new(HEADER, KEY, "", CIPHER_TEXT, AUTH_TAG).is_ok());
// When init_vector is not the correct size, correct error is returned
match Jwe::new(HEADER, KEY, "foo", CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When init_vector is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!!";
match Jwe::new(HEADER, KEY, not64, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_cipher_text_value_correctly() {
// When cipher_text is empty, correct error is returned
match Jwe::new(HEADER, KEY, INIT_VECTOR, "", AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
// When cipher_text is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(HEADER, KEY, INIT_VECTOR, &too_long, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When cipher_text is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(HEADER, KEY, INIT_VECTOR, not64, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_auth_tag_value_correctly() {
// When auth_tag is empty, JWE is created
assert!(Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, "").is_ok());
// When auth_tag is not the correct size, correct error is returned
match Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, "foo") {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When auth_tag is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!!!!!!!!";
match Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, not64) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn tranforms_jwe_struct_to_string_correctly() {
let jwe = Jwe::from_str(JWE).unwrap();
assert_eq!(jwe.to_string(), JWE);
}
#[test]
fn validates_base64url_correctly() {
assert!(validate_base64url_encoding(
"0987654321AaBbCcDdEeFfGgHhIiKkLlMmNnOoPpQqRrSsTtUuVvXxWwYyZz-_"
));
assert!(validate_base64url_encoding(""));
assert!(!validate_base64url_encoding("aa aa"));
assert!(!validate_base64url_encoding("aa.aa"));
assert!(!validate_base64url_encoding("!nv@lid-val*e"));
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::fmt;
use std::str::FromStr;
use serde::Serialize;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::{Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
const DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT: usize = 1024;
/// Verifies if a string is [`BASE64URL`](https://tools.ietf.org/html/rfc4648#section-5) compliant.
///
/// As such, the string must match the regex: `[a-zA-Z0-9\-\_]*`.
///
/// > **Note** As described in the [JWS specification](https://tools.ietf.org/html/rfc7515#section-2),
/// > the BASE64URL encoding used by JWE discards any padding,
/// > that is why we can ignore that for this validation.
///
/// The regex crate isn't used here because it adds to the binary size,
/// and the Glean SDK doesn't use regular expressions anywhere else.
fn validate_base64url_encoding(value: &str) -> bool {
let mut iter = value.chars();
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('_') | Some('-') | Some('a'..='z') | Some('A'..='Z') | Some('0'..='9') => (),
// An invalid character.
Some(_) => return false,
}
}
}
/// Representation of a [JWE](https://tools.ietf.org/html/rfc7516).
///
/// **Note** Variable sized elements will be constrained to a length of DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT,
/// this is a constraint introduced by Glean to prevent abuses and not part of the spec.
#[derive(Serialize)]
struct Jwe {
/// A variable-size JWE protected header.
header: String,
/// A variable-size [encrypted key](https://tools.ietf.org/html/rfc7516#appendix-A.1.3).
/// This can be an empty octet sequence.
key: String,
/// A fixed-size, 96-bit, base64 encoded [JWE Initialization vector](https://tools.ietf.org/html/rfc7516#appendix-A.1.4) (e.g. “48V1_ALb6US04U3b”).
/// If not required by the encryption algorithm, can be an empty octet sequence.
init_vector: String,
/// The variable-size base64 encoded cipher text.
cipher_text: String,
/// A fixed-size, 132-bit, base64 encoded authentication tag.
/// Can be an empty octet sequence.
auth_tag: String,
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl Jwe {
/// Create a new JWE struct.
fn new<S: Into<String>>(
header: S,
key: S,
init_vector: S,
cipher_text: S,
auth_tag: S,
) -> Result<Self, (ErrorType, String)> {
let mut header = header.into();
header = Self::validate_non_empty("header", header)?;
header = Self::validate_max_size("header", header)?;
header = Self::validate_base64url_encoding("header", header)?;
let mut key = key.into();
key = Self::validate_max_size("key", key)?;
key = Self::validate_base64url_encoding("key", key)?;
let mut init_vector = init_vector.into();
init_vector = Self::validate_fixed_size_or_empty("init_vector", init_vector, 96)?;
init_vector = Self::validate_base64url_encoding("init_vector", init_vector)?;
let mut cipher_text = cipher_text.into();
cipher_text = Self::validate_non_empty("cipher_text", cipher_text)?;
cipher_text = Self::validate_max_size("cipher_text", cipher_text)?;
cipher_text = Self::validate_base64url_encoding("cipher_text", cipher_text)?;
let mut auth_tag = auth_tag.into();
auth_tag = Self::validate_fixed_size_or_empty("auth_tag", auth_tag, 128)?;
auth_tag = Self::validate_base64url_encoding("auth_tag", auth_tag)?;
Ok(Self {
header,
key,
init_vector,
cipher_text,
auth_tag,
})
}
fn validate_base64url_encoding(
name: &str,
value: String,
) -> Result<String, (ErrorType, String)> {
if !validate_base64url_encoding(&value) {
return Err((
ErrorType::InvalidValue,
format!("`{}` element in JWE value is not valid BASE64URL.", name),
));
}
Ok(value)
}
fn validate_non_empty(name: &str, value: String) -> Result<String, (ErrorType, String)> {
if value.is_empty() {
return Err((
ErrorType::InvalidValue,
format!("`{}` element in JWE value must not be empty.", name),
));
}
Ok(value)
}
fn validate_max_size(name: &str, value: String) -> Result<String, (ErrorType, String)> {
if value.len() > DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT {
return Err((
ErrorType::InvalidOverflow,
format!(
"`{}` element in JWE value must not exceed {} characters.",
name, DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT
),
));
}
Ok(value)
}
fn validate_fixed_size_or_empty(
name: &str,
value: String,
size_in_bits: usize,
) -> Result<String, (ErrorType, String)> {
// Each Base64 digit represents exactly 6 bits of data.
// By dividing the size_in_bits by 6 and ceiling the result,
// we get the amount of characters the value should have.
let num_chars = (size_in_bits as f32 / 6f32).ceil() as usize;
if !value.is_empty() && value.len() != num_chars {
return Err((
ErrorType::InvalidOverflow,
format!(
"`{}` element in JWE value must have exactly {}-bits or be empty.",
name, size_in_bits
),
));
}
Ok(value)
}
}
/// Trait implementation to convert a JWE [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7)
/// string into a Jwe struct.
impl FromStr for Jwe {
type Err = (ErrorType, String);
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut elements: Vec<&str> = s.split('.').collect();
if elements.len() != 5 {
return Err((
ErrorType::InvalidValue,
"JWE value is not formatted as expected.".into(),
));
}
// Consume the vector extracting each part of the JWE from it.
//
// Safe unwraps, we already defined that the slice has five elements.
let auth_tag = elements.pop().unwrap();
let cipher_text = elements.pop().unwrap();
let init_vector = elements.pop().unwrap();
let key = elements.pop().unwrap();
let header = elements.pop().unwrap();
Self::new(header, key, init_vector, cipher_text, auth_tag)
}
}
/// Trait implementation to print the Jwe struct as the proper JWE [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7).
impl fmt::Display for Jwe {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}.{}.{}.{}.{}",
self.header, self.key, self.init_vector, self.cipher_text, self.auth_tag
)
}
}
/// A JWE metric.
///
/// This metric will be work as a "transport" for JWE encrypted data.
///
/// The actual encrypti on is done somewhere else,
/// Glean must only make sure the data is valid JWE.
#[derive(Clone, Debug)]
pub struct JweMetric {
meta: CommonMetricData,
}
impl MetricType for JweMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl JweMetric {
/// Creates a new JWE metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified JWE value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - the [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7) of a JWE value.
pub fn set_with_compact_representation<S: Into<String>>(&self, glean: &Glean, value: S) {
if !self.should_record(glean) {
return;
}
let value = value.into();
match Jwe::from_str(&value) {
Ok(_) => glean
.storage()
.record(glean, &self.meta, &Metric::Jwe(value)),
Err((error_type, msg)) => record_error(glean, &self.meta, error_type, msg, None),
};
}
/// Builds a JWE value from its elements and set to it.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `header` - the JWE Protected Header element.
/// * `key` - the JWE Encrypted Key element.
/// * `init_vector` - the JWE Initialization Vector element.
/// * `cipher_text` - the JWE Ciphertext element.
/// * `auth_tag` - the JWE Authentication Tag element.
pub fn set<S: Into<String>>(
&self,
glean: &Glean,
header: S,
key: S,
init_vector: S,
cipher_text: S,
auth_tag: S,
) {
if !self.should_record(glean) {
return;
}
match Jwe::new(header, key, init_vector, cipher_text, auth_tag) {
Ok(jwe) => glean
.storage()
.record(glean, &self.meta, &Metric::Jwe(jwe.to_string())),
Err((error_type, msg)) => record_error(glean, &self.meta, error_type, msg, None),
};
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Jwe(b)) => Some(b),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored JWE as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name).map(|snapshot| {
serde_json::to_string(
&Jwe::from_str(&snapshot).expect("Stored JWE metric should be valid JWE value."),
)
.unwrap()
})
}
}
#[cfg(test)]
mod test {
use super::*;
const HEADER: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ";
const KEY: &str = "OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg";
const INIT_VECTOR: &str = "48V1_ALb6US04U3b";
const CIPHER_TEXT: &str =
"5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A";
const AUTH_TAG: &str = "XFBoMYUZodetZdvTiFvSkQ";
const JWE: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ.OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg.48V1_ALb6US04U3b.5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A.XFBoMYUZodetZdvTiFvSkQ";
#[test]
fn generates_jwe_from_correct_input() {
let jwe = Jwe::from_str(JWE).unwrap();
assert_eq!(jwe.header, HEADER);
assert_eq!(jwe.key, KEY);
assert_eq!(jwe.init_vector, INIT_VECTOR);
assert_eq!(jwe.cipher_text, CIPHER_TEXT);
assert_eq!(jwe.auth_tag, AUTH_TAG);
assert!(Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG).is_ok());
}
#[test]
fn jwe_validates_header_value_correctly() {
// When header is empty, correct error is returned
match Jwe::new("", KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
// When header is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(
too_long,
KEY.into(),
INIT_VECTOR.into(),
CIPHER_TEXT.into(),
AUTH_TAG.into(),
) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When header is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(not64, KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_key_value_correctly() {
// When key is empty,JWE is created
assert!(Jwe::new(HEADER, "", INIT_VECTOR, CIPHER_TEXT, AUTH_TAG).is_ok());
// When key is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(HEADER, &too_long, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When key is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(HEADER, not64, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_init_vector_value_correctly() {
// When init_vector is empty, JWE is created
assert!(Jwe::new(HEADER, KEY, "", CIPHER_TEXT, AUTH_TAG).is_ok());
// When init_vector is not the correct size, correct error is returned
match Jwe::new(HEADER, KEY, "foo", CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When init_vector is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!!";
match Jwe::new(HEADER, KEY, not64, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_cipher_text_value_correctly() {
// When cipher_text is empty, correct error is returned
match Jwe::new(HEADER, KEY, INIT_VECTOR, "", AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
// When cipher_text is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(HEADER, KEY, INIT_VECTOR, &too_long, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When cipher_text is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(HEADER, KEY, INIT_VECTOR, not64, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_auth_tag_value_correctly() {
// When auth_tag is empty, JWE is created
assert!(Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, "").is_ok());
// When auth_tag is not the correct size, correct error is returned
match Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, "foo") {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When auth_tag is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!!!!!!!!";
match Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, not64) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn tranforms_jwe_struct_to_string_correctly() {
let jwe = Jwe::from_str(JWE).unwrap();
assert_eq!(jwe.to_string(), JWE);
}
#[test]
fn validates_base64url_correctly() {
assert!(validate_base64url_encoding(
"0987654321AaBbCcDdEeFfGgHhIiKkLlMmNnOoPpQqRrSsTtUuVvXxWwYyZz-_"
));
assert!(validate_base64url_encoding(""));
assert!(!validate_base64url_encoding("aa aa"));
assert!(!validate_base64url_encoding("aa.aa"));
assert!(!validate_base64url_encoding("!nv@lid-val*e"));
}
}

Просмотреть файл

@ -1,252 +1,252 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::common_metric_data::CommonMetricData;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::{Metric, MetricType};
use crate::Glean;
const MAX_LABELS: usize = 16;
const OTHER_LABEL: &str = "__other__";
const MAX_LABEL_LENGTH: usize = 61;
/// Checks whether the given value matches the label regex.
///
/// This regex is used for matching against labels and should allow for dots,
/// underscores, and/or hyphens. Labels are also limited to starting with either
/// a letter or an underscore character.
///
/// The exact regex (from the pipeline schema [here](https://github.com/mozilla-services/mozilla-pipeline-schemas/blob/master/templates/include/glean/dot_separated_short_id.1.schema.json)) is:
///
/// "^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$"
///
/// The regex crate isn't used here because it adds to the binary size, and the
/// Glean SDK doesn't use regular expressions anywhere else.
///
/// Some examples of good and bad labels:
///
/// Good:
/// * `this.is.fine`
/// * `this_is_fine_too`
/// * `this.is_still_fine`
/// * `thisisfine`
/// * `_.is_fine`
/// * `this.is-fine`
/// * `this-is-fine`
/// Bad:
/// * `this.is.not_fine_due_tu_the_length_being_too_long_i_thing.i.guess`
/// * `1.not_fine`
/// * `this.$isnotfine`
/// * `-.not_fine`
fn matches_label_regex(value: &str) -> bool {
let mut iter = value.chars();
loop {
// Match the first letter in the word.
match iter.next() {
Some('_') | Some('a'..='z') => (),
_ => return false,
};
// Match subsequent letters in the word.
let mut count = 0;
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('_') | Some('-') | Some('a'..='z') | Some('0'..='9') => (),
// We ended a word, so iterate over the outer loop again.
Some('.') => break,
// An invalid character
_ => return false,
}
count += 1;
// We allow 30 characters per word, but the first one is handled
// above outside of this loop, so we have a maximum of 29 here.
if count == 29 {
return false;
}
}
}
}
/// A labeled metric.
///
/// Labeled metrics allow to record multiple sub-metrics of the same type under different string labels.
#[derive(Clone, Debug)]
pub struct LabeledMetric<T> {
labels: Option<Vec<String>>,
/// Type of the underlying metric
/// We hold on to an instance of it, which is cloned to create new modified instances.
submetric: T,
}
impl<T> LabeledMetric<T>
where
T: MetricType + Clone,
{
/// Creates a new labeled metric from the given metric instance and optional list of labels.
///
/// See [`get`](#method.get) for information on how static or dynamic labels are handled.
pub fn new(submetric: T, labels: Option<Vec<String>>) -> LabeledMetric<T> {
LabeledMetric { labels, submetric }
}
/// Creates a new metric with a specific label.
///
/// This is used for static labels where we can just set the name to be `name/label`.
fn new_metric_with_name(&self, name: String) -> T {
let mut t = self.submetric.clone();
t.meta_mut().name = name;
t
}
/// Creates a new metric with a specific label.
///
/// This is used for dynamic labels where we have to actually validate and correct the
/// label later when we have a Glean object.
fn new_metric_with_dynamic_label(&self, label: String) -> T {
let mut t = self.submetric.clone();
t.meta_mut().dynamic_label = Some(label);
t
}
/// Creates a static label.
///
/// # Safety
///
/// Should only be called when static labels are available on this metric.
///
/// # Arguments
///
/// * `label` - The requested label
///
/// # Returns
///
/// The requested label if it is in the list of allowed labels.
/// Otherwise `OTHER_LABEL` is returned.
fn static_label<'a>(&self, label: &'a str) -> &'a str {
debug_assert!(self.labels.is_some());
let labels = self.labels.as_ref().unwrap();
if labels.iter().any(|l| l == label) {
label
} else {
OTHER_LABEL
}
}
/// Gets a specific metric for a given label.
///
/// If a set of acceptable labels were specified in the `metrics.yaml` file,
/// and the given label is not in the set, it will be recorded under the special `OTHER_LABEL` label.
///
/// If a set of acceptable labels was not specified in the `metrics.yaml` file,
/// only the first 16 unique labels will be used.
/// After that, any additional labels will be recorded under the special `OTHER_LABEL` label.
///
/// Labels must be `snake_case` and less than 30 characters.
/// If an invalid label is used, the metric will be recorded in the special `OTHER_LABEL` label.
pub fn get(&self, label: &str) -> T {
// We have 2 scenarios to consider:
// * Static labels. No database access needed. We just look at what is in memory.
// * Dynamic labels. We look up in the database all previously stored
// labels in order to keep a maximum of allowed labels. This is done later
// when the specific metric is actually recorded, when we are guaranteed to have
// an initialized Glean object.
match self.labels {
Some(_) => {
let label = self.static_label(label);
self.new_metric_with_name(combine_base_identifier_and_label(
&self.submetric.meta().name,
&label,
))
}
None => self.new_metric_with_dynamic_label(label.to_string()),
}
}
/// Gets the template submetric.
///
/// The template submetric is the actual metric that is cloned and modified
/// to record for a specific label.
pub fn get_submetric(&self) -> &T {
&self.submetric
}
}
/// Combines a metric's base identifier and label
pub fn combine_base_identifier_and_label(base_identifer: &str, label: &str) -> String {
format!("{}/{}", base_identifer, label)
}
/// Strips the label off of a complete identifier
pub fn strip_label(identifier: &str) -> &str {
// safe unwrap, first field of a split always valid
identifier.splitn(2, '/').next().unwrap()
}
/// Validates a dynamic label, changing it to OTHER_LABEL if it's invalid.
///
/// Checks the requested label against limitations, such as the label length and allowed
/// characters.
///
/// # Arguments
///
/// * `label` - The requested label
///
/// # Returns
///
/// The entire identifier for the metric, including the base identifier and the corrected label.
/// The errors are logged.
pub fn dynamic_label(
glean: &Glean,
meta: &CommonMetricData,
base_identifier: &str,
label: &str,
) -> String {
let key = combine_base_identifier_and_label(base_identifier, label);
for store in &meta.send_in_pings {
if glean.storage().has_metric(meta.lifetime, store, &key) {
return key;
}
}
let mut label_count = 0;
let prefix = &key[..=base_identifier.len()];
let mut snapshotter = |_: &[u8], _: &Metric| {
label_count += 1;
};
let lifetime = meta.lifetime;
for store in &meta.send_in_pings {
glean
.storage()
.iter_store_from(lifetime, store, Some(&prefix), &mut snapshotter);
}
let error = if label_count >= MAX_LABELS {
true
} else if label.len() > MAX_LABEL_LENGTH {
let msg = format!(
"label length {} exceeds maximum of {}",
label.len(),
MAX_LABEL_LENGTH
);
record_error(glean, meta, ErrorType::InvalidLabel, msg, None);
true
} else if !matches_label_regex(label) {
let msg = format!("label must be snake_case, got '{}'", label);
record_error(glean, meta, ErrorType::InvalidLabel, msg, None);
true
} else {
false
};
if error {
combine_base_identifier_and_label(base_identifier, OTHER_LABEL)
} else {
key
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::common_metric_data::CommonMetricData;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::{Metric, MetricType};
use crate::Glean;
const MAX_LABELS: usize = 16;
const OTHER_LABEL: &str = "__other__";
const MAX_LABEL_LENGTH: usize = 61;
/// Checks whether the given value matches the label regex.
///
/// This regex is used for matching against labels and should allow for dots,
/// underscores, and/or hyphens. Labels are also limited to starting with either
/// a letter or an underscore character.
///
/// The exact regex (from the pipeline schema [here](https://github.com/mozilla-services/mozilla-pipeline-schemas/blob/master/templates/include/glean/dot_separated_short_id.1.schema.json)) is:
///
/// "^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$"
///
/// The regex crate isn't used here because it adds to the binary size, and the
/// Glean SDK doesn't use regular expressions anywhere else.
///
/// Some examples of good and bad labels:
///
/// Good:
/// * `this.is.fine`
/// * `this_is_fine_too`
/// * `this.is_still_fine`
/// * `thisisfine`
/// * `_.is_fine`
/// * `this.is-fine`
/// * `this-is-fine`
/// Bad:
/// * `this.is.not_fine_due_tu_the_length_being_too_long_i_thing.i.guess`
/// * `1.not_fine`
/// * `this.$isnotfine`
/// * `-.not_fine`
fn matches_label_regex(value: &str) -> bool {
let mut iter = value.chars();
loop {
// Match the first letter in the word.
match iter.next() {
Some('_') | Some('a'..='z') => (),
_ => return false,
};
// Match subsequent letters in the word.
let mut count = 0;
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('_') | Some('-') | Some('a'..='z') | Some('0'..='9') => (),
// We ended a word, so iterate over the outer loop again.
Some('.') => break,
// An invalid character
_ => return false,
}
count += 1;
// We allow 30 characters per word, but the first one is handled
// above outside of this loop, so we have a maximum of 29 here.
if count == 29 {
return false;
}
}
}
}
/// A labeled metric.
///
/// Labeled metrics allow to record multiple sub-metrics of the same type under different string labels.
#[derive(Clone, Debug)]
pub struct LabeledMetric<T> {
labels: Option<Vec<String>>,
/// Type of the underlying metric
/// We hold on to an instance of it, which is cloned to create new modified instances.
submetric: T,
}
impl<T> LabeledMetric<T>
where
T: MetricType + Clone,
{
/// Creates a new labeled metric from the given metric instance and optional list of labels.
///
/// See [`get`](#method.get) for information on how static or dynamic labels are handled.
pub fn new(submetric: T, labels: Option<Vec<String>>) -> LabeledMetric<T> {
LabeledMetric { labels, submetric }
}
/// Creates a new metric with a specific label.
///
/// This is used for static labels where we can just set the name to be `name/label`.
fn new_metric_with_name(&self, name: String) -> T {
let mut t = self.submetric.clone();
t.meta_mut().name = name;
t
}
/// Creates a new metric with a specific label.
///
/// This is used for dynamic labels where we have to actually validate and correct the
/// label later when we have a Glean object.
fn new_metric_with_dynamic_label(&self, label: String) -> T {
let mut t = self.submetric.clone();
t.meta_mut().dynamic_label = Some(label);
t
}
/// Creates a static label.
///
/// # Safety
///
/// Should only be called when static labels are available on this metric.
///
/// # Arguments
///
/// * `label` - The requested label
///
/// # Returns
///
/// The requested label if it is in the list of allowed labels.
/// Otherwise `OTHER_LABEL` is returned.
fn static_label<'a>(&self, label: &'a str) -> &'a str {
debug_assert!(self.labels.is_some());
let labels = self.labels.as_ref().unwrap();
if labels.iter().any(|l| l == label) {
label
} else {
OTHER_LABEL
}
}
/// Gets a specific metric for a given label.
///
/// If a set of acceptable labels were specified in the `metrics.yaml` file,
/// and the given label is not in the set, it will be recorded under the special `OTHER_LABEL` label.
///
/// If a set of acceptable labels was not specified in the `metrics.yaml` file,
/// only the first 16 unique labels will be used.
/// After that, any additional labels will be recorded under the special `OTHER_LABEL` label.
///
/// Labels must be `snake_case` and less than 30 characters.
/// If an invalid label is used, the metric will be recorded in the special `OTHER_LABEL` label.
pub fn get(&self, label: &str) -> T {
// We have 2 scenarios to consider:
// * Static labels. No database access needed. We just look at what is in memory.
// * Dynamic labels. We look up in the database all previously stored
// labels in order to keep a maximum of allowed labels. This is done later
// when the specific metric is actually recorded, when we are guaranteed to have
// an initialized Glean object.
match self.labels {
Some(_) => {
let label = self.static_label(label);
self.new_metric_with_name(combine_base_identifier_and_label(
&self.submetric.meta().name,
&label,
))
}
None => self.new_metric_with_dynamic_label(label.to_string()),
}
}
/// Gets the template submetric.
///
/// The template submetric is the actual metric that is cloned and modified
/// to record for a specific label.
pub fn get_submetric(&self) -> &T {
&self.submetric
}
}
/// Combines a metric's base identifier and label
pub fn combine_base_identifier_and_label(base_identifer: &str, label: &str) -> String {
format!("{}/{}", base_identifer, label)
}
/// Strips the label off of a complete identifier
pub fn strip_label(identifier: &str) -> &str {
// safe unwrap, first field of a split always valid
identifier.splitn(2, '/').next().unwrap()
}
/// Validates a dynamic label, changing it to OTHER_LABEL if it's invalid.
///
/// Checks the requested label against limitations, such as the label length and allowed
/// characters.
///
/// # Arguments
///
/// * `label` - The requested label
///
/// # Returns
///
/// The entire identifier for the metric, including the base identifier and the corrected label.
/// The errors are logged.
pub fn dynamic_label(
glean: &Glean,
meta: &CommonMetricData,
base_identifier: &str,
label: &str,
) -> String {
let key = combine_base_identifier_and_label(base_identifier, label);
for store in &meta.send_in_pings {
if glean.storage().has_metric(meta.lifetime, store, &key) {
return key;
}
}
let mut label_count = 0;
let prefix = &key[..=base_identifier.len()];
let mut snapshotter = |_: &[u8], _: &Metric| {
label_count += 1;
};
let lifetime = meta.lifetime;
for store in &meta.send_in_pings {
glean
.storage()
.iter_store_from(lifetime, store, Some(&prefix), &mut snapshotter);
}
let error = if label_count >= MAX_LABELS {
true
} else if label.len() > MAX_LABEL_LENGTH {
let msg = format!(
"label length {} exceeds maximum of {}",
label.len(),
MAX_LABEL_LENGTH
);
record_error(glean, meta, ErrorType::InvalidLabel, msg, None);
true
} else if !matches_label_regex(label) {
let msg = format!("label must be snake_case, got '{}'", label);
record_error(glean, meta, ErrorType::InvalidLabel, msg, None);
true
} else {
false
};
if error {
combine_base_identifier_and_label(base_identifier, OTHER_LABEL)
} else {
key
}
}

Просмотреть файл

@ -1,212 +1,212 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::memory_unit::MemoryUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
// The base of the logarithm used to determine bucketing
const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 16.0;
// Set a maximum recordable value of 1 terabyte so the buckets aren't
// completely unbounded.
const MAX_BYTES: u64 = 1 << 40;
/// A memory distribution metric.
///
/// Memory distributions are used to accumulate and store memory sizes.
#[derive(Debug)]
pub struct MemoryDistributionMetric {
meta: CommonMetricData,
memory_unit: MemoryUnit,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
sum: hist.sum(),
}
}
impl MetricType for MemoryDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl MemoryDistributionMetric {
/// Creates a new memory distribution metric.
pub fn new(meta: CommonMetricData, memory_unit: MemoryUnit) -> Self {
Self { meta, memory_unit }
}
/// Accumulates the provided sample in the metric.
///
/// # Arguments
///
/// * `sample` - The sample to be recorded by the metric. The sample is assumed to be in the
/// configured memory unit of the metric.
///
/// ## Notes
///
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
pub fn accumulate(&self, glean: &Glean, sample: u64) {
if !self.should_record(glean) {
return;
}
let mut sample = self.memory_unit.as_bytes(sample);
if sample > MAX_BYTES {
let msg = "Sample is bigger than 1 terabyte";
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
sample = MAX_BYTES;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::MemoryDistribution(mut hist)) => {
hist.accumulate(sample);
Metric::MemoryDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(sample);
Metric::MemoryDistribution(hist)
}
});
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [MemoryDistributionMetricType] and the instance this
/// method was called on is using [MemoryUnit.Kilobyte], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
let mut num_too_log_samples = 0;
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::MemoryDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
let mut sample = self.memory_unit.as_bytes(sample);
if sample > MAX_BYTES {
num_too_log_samples += 1;
sample = MAX_BYTES;
}
hist.accumulate(sample);
}
}
Metric::MemoryDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_log_samples > 0 {
let msg = format!(
"Accumulated {} samples larger than 1TB",
num_too_log_samples
);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_too_log_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::MemoryDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::memory_unit::MemoryUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
// The base of the logarithm used to determine bucketing
const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 16.0;
// Set a maximum recordable value of 1 terabyte so the buckets aren't
// completely unbounded.
const MAX_BYTES: u64 = 1 << 40;
/// A memory distribution metric.
///
/// Memory distributions are used to accumulate and store memory sizes.
#[derive(Debug)]
pub struct MemoryDistributionMetric {
meta: CommonMetricData,
memory_unit: MemoryUnit,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
sum: hist.sum(),
}
}
impl MetricType for MemoryDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl MemoryDistributionMetric {
/// Creates a new memory distribution metric.
pub fn new(meta: CommonMetricData, memory_unit: MemoryUnit) -> Self {
Self { meta, memory_unit }
}
/// Accumulates the provided sample in the metric.
///
/// # Arguments
///
/// * `sample` - The sample to be recorded by the metric. The sample is assumed to be in the
/// configured memory unit of the metric.
///
/// ## Notes
///
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
pub fn accumulate(&self, glean: &Glean, sample: u64) {
if !self.should_record(glean) {
return;
}
let mut sample = self.memory_unit.as_bytes(sample);
if sample > MAX_BYTES {
let msg = "Sample is bigger than 1 terabyte";
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
sample = MAX_BYTES;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::MemoryDistribution(mut hist)) => {
hist.accumulate(sample);
Metric::MemoryDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(sample);
Metric::MemoryDistribution(hist)
}
});
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [MemoryDistributionMetricType] and the instance this
/// method was called on is using [MemoryUnit.Kilobyte], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
let mut num_too_log_samples = 0;
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::MemoryDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
let mut sample = self.memory_unit.as_bytes(sample);
if sample > MAX_BYTES {
num_too_log_samples += 1;
sample = MAX_BYTES;
}
hist.accumulate(sample);
}
}
Metric::MemoryDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_log_samples > 0 {
let msg = format!(
"Accumulated {} samples larger than 1TB",
num_too_log_samples
);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_too_log_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::MemoryDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}

Просмотреть файл

@ -1,64 +1,64 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
/// Different resolutions supported by the memory related metric types (e.g.
/// MemoryDistributionMetric).
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
#[repr(i32)] // use i32 to be compatible with our JNA definition
pub enum MemoryUnit {
/// 1 byte
Byte,
/// 2^10 bytes
Kilobyte,
/// 2^20 bytes
Megabyte,
/// 2^30 bytes
Gigabyte,
}
impl MemoryUnit {
/// Converts a value in the given unit to bytes.
///
/// # Arguments
///
/// * `value` - the value to convert.
///
/// # Returns
///
/// The integer representation of the byte value.
pub fn as_bytes(self, value: u64) -> u64 {
use MemoryUnit::*;
match self {
Byte => value,
Kilobyte => value << 10,
Megabyte => value << 20,
Gigabyte => value << 30,
}
}
}
/// Trait implementation for converting an integer value
/// to a MemoryUnit. This is used in the FFI code. Please
/// note that values should match the ordering of the platform
/// specific side of things (e.g. Kotlin implementation).
impl TryFrom<i32> for MemoryUnit {
type Error = Error;
fn try_from(value: i32) -> Result<MemoryUnit, Self::Error> {
match value {
0 => Ok(MemoryUnit::Byte),
1 => Ok(MemoryUnit::Kilobyte),
2 => Ok(MemoryUnit::Megabyte),
3 => Ok(MemoryUnit::Gigabyte),
e => Err(ErrorKind::MemoryUnit(e).into()),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
/// Different resolutions supported by the memory related metric types (e.g.
/// MemoryDistributionMetric).
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
#[repr(i32)] // use i32 to be compatible with our JNA definition
pub enum MemoryUnit {
/// 1 byte
Byte,
/// 2^10 bytes
Kilobyte,
/// 2^20 bytes
Megabyte,
/// 2^30 bytes
Gigabyte,
}
impl MemoryUnit {
/// Converts a value in the given unit to bytes.
///
/// # Arguments
///
/// * `value` - the value to convert.
///
/// # Returns
///
/// The integer representation of the byte value.
pub fn as_bytes(self, value: u64) -> u64 {
use MemoryUnit::*;
match self {
Byte => value,
Kilobyte => value << 10,
Megabyte => value << 20,
Gigabyte => value << 30,
}
}
}
/// Trait implementation for converting an integer value
/// to a MemoryUnit. This is used in the FFI code. Please
/// note that values should match the ordering of the platform
/// specific side of things (e.g. Kotlin implementation).
impl TryFrom<i32> for MemoryUnit {
type Error = Error;
fn try_from(value: i32) -> Result<MemoryUnit, Self::Error> {
match value {
0 => Ok(MemoryUnit::Byte),
1 => Ok(MemoryUnit::Kilobyte),
2 => Ok(MemoryUnit::Megabyte),
3 => Ok(MemoryUnit::Gigabyte),
e => Err(ErrorKind::MemoryUnit(e).into()),
}
}
}

374
third_party/rust/glean-core/src/metrics/mod.rs поставляемый
Просмотреть файл

@ -1,187 +1,187 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! The different metric types supported by the Glean SDK to handle data.
use std::collections::HashMap;
use chrono::{DateTime, FixedOffset};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
mod boolean;
mod counter;
mod custom_distribution;
mod datetime;
mod event;
mod experiment;
mod jwe;
mod labeled;
mod memory_distribution;
mod memory_unit;
mod ping;
mod quantity;
mod string;
mod string_list;
mod time_unit;
mod timespan;
mod timing_distribution;
mod uuid;
pub use crate::event_database::RecordedEvent;
use crate::histogram::{Functional, Histogram, PrecomputedExponential, PrecomputedLinear};
pub use crate::metrics::datetime::Datetime;
use crate::util::get_iso_time_string;
use crate::CommonMetricData;
use crate::Glean;
pub use self::boolean::BooleanMetric;
pub use self::counter::CounterMetric;
pub use self::custom_distribution::CustomDistributionMetric;
pub use self::datetime::DatetimeMetric;
pub use self::event::EventMetric;
pub(crate) use self::experiment::ExperimentMetric;
pub use crate::histogram::HistogramType;
// Note: only expose RecordedExperimentData to tests in
// the next line, so that glean-core\src\lib.rs won't fail to build.
#[cfg(test)]
pub(crate) use self::experiment::RecordedExperimentData;
pub use self::jwe::JweMetric;
pub use self::labeled::{
combine_base_identifier_and_label, dynamic_label, strip_label, LabeledMetric,
};
pub use self::memory_distribution::MemoryDistributionMetric;
pub use self::memory_unit::MemoryUnit;
pub use self::ping::PingType;
pub use self::quantity::QuantityMetric;
pub use self::string::StringMetric;
pub use self::string_list::StringListMetric;
pub use self::time_unit::TimeUnit;
pub use self::timespan::TimespanMetric;
pub use self::timing_distribution::TimerId;
pub use self::timing_distribution::TimingDistributionMetric;
pub use self::uuid::UuidMetric;
/// A snapshot of all buckets and the accumulated sum of a distribution.
#[derive(Debug, Serialize)]
pub struct DistributionData {
/// A map containig the bucket index mapped to the accumulated count.
///
/// This can contain buckets with a count of `0`.
pub values: HashMap<u64, u64>,
/// The accumulated sum of all the samples in the distribution.
pub sum: u64,
}
/// The available metrics.
///
/// This is the in-memory and persisted layout of a metric.
///
/// ## Note
///
/// The order of metrics in this enum is important, as it is used for serialization.
/// Do not reorder the variants.
///
/// **Any new metric must be added at the end.**
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum Metric {
/// A boolean metric. See [`BooleanMetric`](struct.BooleanMetric.html) for more information.
Boolean(bool),
/// A counter metric. See [`CounterMetric`](struct.CounterMetric.html) for more information.
Counter(i32),
/// A custom distribution with precomputed exponential bucketing.
/// See [`CustomDistributionMetric`](struct.CustomDistributionMetric.html) for more information.
CustomDistributionExponential(Histogram<PrecomputedExponential>),
/// A custom distribution with precomputed linear bucketing.
/// See [`CustomDistributionMetric`](struct.CustomDistributionMetric.html) for more information.
CustomDistributionLinear(Histogram<PrecomputedLinear>),
/// A datetime metric. See [`DatetimeMetric`](struct.DatetimeMetric.html) for more information.
Datetime(DateTime<FixedOffset>, TimeUnit),
/// An experiment metric. See [`ExperimentMetric`](struct.ExperimentMetric.html) for more information.
Experiment(experiment::RecordedExperimentData),
/// A quantity metric. See [`QuantityMetric`](struct.QuantityMetric.html) for more information.
Quantity(i64),
/// A string metric. See [`StringMetric`](struct.StringMetric.html) for more information.
String(String),
/// A string list metric. See [`StringListMetric`](struct.StringListMetric.html) for more information.
StringList(Vec<String>),
/// A UUID metric. See [`UuidMetric`](struct.UuidMetric.html) for more information.
Uuid(String),
/// A timespan metric. See [`TimespanMetric`](struct.TimespanMetric.html) for more information.
Timespan(std::time::Duration, TimeUnit),
/// A timing distribution. See [`TimingDistributionMetric`](struct.TimingDistributionMetric.html) for more information.
TimingDistribution(Histogram<Functional>),
/// A memory distribution. See [`MemoryDistributionMetric`](struct.MemoryDistributionMetric.html) for more information.
MemoryDistribution(Histogram<Functional>),
/// A JWE metric. See [`JweMetric`](struct.JweMetric.html) for more information.
Jwe(String),
}
/// A `MetricType` describes common behavior across all metrics.
pub trait MetricType {
/// Access the stored metadata
fn meta(&self) -> &CommonMetricData;
/// Access the stored metadata mutable
fn meta_mut(&mut self) -> &mut CommonMetricData;
/// Whether this metric should currently be recorded
///
/// This depends on the metrics own state, as determined by its metadata,
/// and whether upload is enabled on the Glean object.
fn should_record(&self, glean: &Glean) -> bool {
glean.is_upload_enabled() && self.meta().should_record()
}
}
impl Metric {
/// Gets the ping section the metric fits into.
///
/// This determines the section of the ping to place the metric data in when
/// assembling the ping payload.
pub fn ping_section(&self) -> &'static str {
match self {
Metric::Boolean(_) => "boolean",
Metric::Counter(_) => "counter",
// Custom distributions are in the same section, no matter what bucketing.
Metric::CustomDistributionExponential(_) => "custom_distribution",
Metric::CustomDistributionLinear(_) => "custom_distribution",
Metric::Datetime(_, _) => "datetime",
Metric::Experiment(_) => panic!("Experiments should not be serialized through this"),
Metric::Quantity(_) => "quantity",
Metric::String(_) => "string",
Metric::StringList(_) => "string_list",
Metric::Timespan(..) => "timespan",
Metric::TimingDistribution(_) => "timing_distribution",
Metric::Uuid(_) => "uuid",
Metric::MemoryDistribution(_) => "memory_distribution",
Metric::Jwe(_) => "jwe",
}
}
/// The JSON representation of the metric's data
pub fn as_json(&self) -> JsonValue {
match self {
Metric::Boolean(b) => json!(b),
Metric::Counter(c) => json!(c),
Metric::CustomDistributionExponential(hist) => {
json!(custom_distribution::snapshot(hist))
}
Metric::CustomDistributionLinear(hist) => json!(custom_distribution::snapshot(hist)),
Metric::Datetime(d, time_unit) => json!(get_iso_time_string(*d, *time_unit)),
Metric::Experiment(e) => e.as_json(),
Metric::Quantity(q) => json!(q),
Metric::String(s) => json!(s),
Metric::StringList(v) => json!(v),
Metric::Timespan(time, time_unit) => {
json!({"value": time_unit.duration_convert(*time), "time_unit": time_unit})
}
Metric::TimingDistribution(hist) => json!(timing_distribution::snapshot(hist)),
Metric::Uuid(s) => json!(s),
Metric::MemoryDistribution(hist) => json!(memory_distribution::snapshot(hist)),
Metric::Jwe(s) => json!(s),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! The different metric types supported by the Glean SDK to handle data.
use std::collections::HashMap;
use chrono::{DateTime, FixedOffset};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
mod boolean;
mod counter;
mod custom_distribution;
mod datetime;
mod event;
mod experiment;
mod jwe;
mod labeled;
mod memory_distribution;
mod memory_unit;
mod ping;
mod quantity;
mod string;
mod string_list;
mod time_unit;
mod timespan;
mod timing_distribution;
mod uuid;
pub use crate::event_database::RecordedEvent;
use crate::histogram::{Functional, Histogram, PrecomputedExponential, PrecomputedLinear};
pub use crate::metrics::datetime::Datetime;
use crate::util::get_iso_time_string;
use crate::CommonMetricData;
use crate::Glean;
pub use self::boolean::BooleanMetric;
pub use self::counter::CounterMetric;
pub use self::custom_distribution::CustomDistributionMetric;
pub use self::datetime::DatetimeMetric;
pub use self::event::EventMetric;
pub(crate) use self::experiment::ExperimentMetric;
pub use crate::histogram::HistogramType;
// Note: only expose RecordedExperimentData to tests in
// the next line, so that glean-core\src\lib.rs won't fail to build.
#[cfg(test)]
pub(crate) use self::experiment::RecordedExperimentData;
pub use self::jwe::JweMetric;
pub use self::labeled::{
combine_base_identifier_and_label, dynamic_label, strip_label, LabeledMetric,
};
pub use self::memory_distribution::MemoryDistributionMetric;
pub use self::memory_unit::MemoryUnit;
pub use self::ping::PingType;
pub use self::quantity::QuantityMetric;
pub use self::string::StringMetric;
pub use self::string_list::StringListMetric;
pub use self::time_unit::TimeUnit;
pub use self::timespan::TimespanMetric;
pub use self::timing_distribution::TimerId;
pub use self::timing_distribution::TimingDistributionMetric;
pub use self::uuid::UuidMetric;
/// A snapshot of all buckets and the accumulated sum of a distribution.
#[derive(Debug, Serialize)]
pub struct DistributionData {
/// A map containig the bucket index mapped to the accumulated count.
///
/// This can contain buckets with a count of `0`.
pub values: HashMap<u64, u64>,
/// The accumulated sum of all the samples in the distribution.
pub sum: u64,
}
/// The available metrics.
///
/// This is the in-memory and persisted layout of a metric.
///
/// ## Note
///
/// The order of metrics in this enum is important, as it is used for serialization.
/// Do not reorder the variants.
///
/// **Any new metric must be added at the end.**
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum Metric {
/// A boolean metric. See [`BooleanMetric`](struct.BooleanMetric.html) for more information.
Boolean(bool),
/// A counter metric. See [`CounterMetric`](struct.CounterMetric.html) for more information.
Counter(i32),
/// A custom distribution with precomputed exponential bucketing.
/// See [`CustomDistributionMetric`](struct.CustomDistributionMetric.html) for more information.
CustomDistributionExponential(Histogram<PrecomputedExponential>),
/// A custom distribution with precomputed linear bucketing.
/// See [`CustomDistributionMetric`](struct.CustomDistributionMetric.html) for more information.
CustomDistributionLinear(Histogram<PrecomputedLinear>),
/// A datetime metric. See [`DatetimeMetric`](struct.DatetimeMetric.html) for more information.
Datetime(DateTime<FixedOffset>, TimeUnit),
/// An experiment metric. See [`ExperimentMetric`](struct.ExperimentMetric.html) for more information.
Experiment(experiment::RecordedExperimentData),
/// A quantity metric. See [`QuantityMetric`](struct.QuantityMetric.html) for more information.
Quantity(i64),
/// A string metric. See [`StringMetric`](struct.StringMetric.html) for more information.
String(String),
/// A string list metric. See [`StringListMetric`](struct.StringListMetric.html) for more information.
StringList(Vec<String>),
/// A UUID metric. See [`UuidMetric`](struct.UuidMetric.html) for more information.
Uuid(String),
/// A timespan metric. See [`TimespanMetric`](struct.TimespanMetric.html) for more information.
Timespan(std::time::Duration, TimeUnit),
/// A timing distribution. See [`TimingDistributionMetric`](struct.TimingDistributionMetric.html) for more information.
TimingDistribution(Histogram<Functional>),
/// A memory distribution. See [`MemoryDistributionMetric`](struct.MemoryDistributionMetric.html) for more information.
MemoryDistribution(Histogram<Functional>),
/// A JWE metric. See [`JweMetric`](struct.JweMetric.html) for more information.
Jwe(String),
}
/// A `MetricType` describes common behavior across all metrics.
pub trait MetricType {
/// Access the stored metadata
fn meta(&self) -> &CommonMetricData;
/// Access the stored metadata mutable
fn meta_mut(&mut self) -> &mut CommonMetricData;
/// Whether this metric should currently be recorded
///
/// This depends on the metrics own state, as determined by its metadata,
/// and whether upload is enabled on the Glean object.
fn should_record(&self, glean: &Glean) -> bool {
glean.is_upload_enabled() && self.meta().should_record()
}
}
impl Metric {
/// Gets the ping section the metric fits into.
///
/// This determines the section of the ping to place the metric data in when
/// assembling the ping payload.
pub fn ping_section(&self) -> &'static str {
match self {
Metric::Boolean(_) => "boolean",
Metric::Counter(_) => "counter",
// Custom distributions are in the same section, no matter what bucketing.
Metric::CustomDistributionExponential(_) => "custom_distribution",
Metric::CustomDistributionLinear(_) => "custom_distribution",
Metric::Datetime(_, _) => "datetime",
Metric::Experiment(_) => panic!("Experiments should not be serialized through this"),
Metric::Quantity(_) => "quantity",
Metric::String(_) => "string",
Metric::StringList(_) => "string_list",
Metric::Timespan(..) => "timespan",
Metric::TimingDistribution(_) => "timing_distribution",
Metric::Uuid(_) => "uuid",
Metric::MemoryDistribution(_) => "memory_distribution",
Metric::Jwe(_) => "jwe",
}
}
/// The JSON representation of the metric's data
pub fn as_json(&self) -> JsonValue {
match self {
Metric::Boolean(b) => json!(b),
Metric::Counter(c) => json!(c),
Metric::CustomDistributionExponential(hist) => {
json!(custom_distribution::snapshot(hist))
}
Metric::CustomDistributionLinear(hist) => json!(custom_distribution::snapshot(hist)),
Metric::Datetime(d, time_unit) => json!(get_iso_time_string(*d, *time_unit)),
Metric::Experiment(e) => e.as_json(),
Metric::Quantity(q) => json!(q),
Metric::String(s) => json!(s),
Metric::StringList(v) => json!(v),
Metric::Timespan(time, time_unit) => {
json!({"value": time_unit.duration_convert(*time), "time_unit": time_unit})
}
Metric::TimingDistribution(hist) => json!(timing_distribution::snapshot(hist)),
Metric::Uuid(s) => json!(s),
Metric::MemoryDistribution(hist) => json!(memory_distribution::snapshot(hist)),
Metric::Jwe(s) => json!(s),
}
}
}

Просмотреть файл

@ -1,78 +1,78 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error::Result;
use crate::Glean;
/// Stores information about a ping.
///
/// This is required so that given metric data queued on disk we can send
/// pings with the correct settings, e.g. whether it has a client_id.
#[derive(Clone, Debug)]
pub struct PingType {
/// The name of the ping.
pub name: String,
/// Whether the ping should include the client ID.
pub include_client_id: bool,
/// Whether the ping should be sent if it is empty
pub send_if_empty: bool,
/// The "reason" codes that this ping can send
pub reason_codes: Vec<String>,
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl PingType {
/// Creates a new ping type for the given name, whether to include the client ID and whether to
/// send this ping empty.
///
/// # Arguments
///
/// * `name` - The name of the ping.
/// * `include_client_id` - Whether to include the client ID in the assembled ping when submitting.
/// * `send_if_empty` - Whether the ping should be sent empty or not.
/// * `reason_codes` - The valid reason codes for this ping.
pub fn new<A: Into<String>>(
name: A,
include_client_id: bool,
send_if_empty: bool,
reason_codes: Vec<String>,
) -> Self {
Self {
name: name.into(),
include_client_id,
send_if_empty,
reason_codes,
}
}
/// Submits the ping for eventual uploading
///
/// # Arguments
///
/// * `glean` - the Glean instance to use to send the ping.
/// * `reason` - the reason the ping was triggered. Included in the
/// `ping_info.reason` part of the payload.
///
/// # Returns
///
/// See [`Glean#submit_ping`](../struct.Glean.html#method.submit_ping) for details.
pub fn submit(&self, glean: &Glean, reason: Option<&str>) -> Result<bool> {
let corrected_reason = match reason {
Some(reason) => {
if self.reason_codes.contains(&reason.to_string()) {
Some(reason)
} else {
log::error!("Invalid reason code {} for ping {}", reason, self.name);
None
}
}
None => None,
};
glean.submit_ping(self, corrected_reason)
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error::Result;
use crate::Glean;
/// Stores information about a ping.
///
/// This is required so that given metric data queued on disk we can send
/// pings with the correct settings, e.g. whether it has a client_id.
#[derive(Clone, Debug)]
pub struct PingType {
/// The name of the ping.
pub name: String,
/// Whether the ping should include the client ID.
pub include_client_id: bool,
/// Whether the ping should be sent if it is empty
pub send_if_empty: bool,
/// The "reason" codes that this ping can send
pub reason_codes: Vec<String>,
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl PingType {
/// Creates a new ping type for the given name, whether to include the client ID and whether to
/// send this ping empty.
///
/// # Arguments
///
/// * `name` - The name of the ping.
/// * `include_client_id` - Whether to include the client ID in the assembled ping when submitting.
/// * `send_if_empty` - Whether the ping should be sent empty or not.
/// * `reason_codes` - The valid reason codes for this ping.
pub fn new<A: Into<String>>(
name: A,
include_client_id: bool,
send_if_empty: bool,
reason_codes: Vec<String>,
) -> Self {
Self {
name: name.into(),
include_client_id,
send_if_empty,
reason_codes,
}
}
/// Submits the ping for eventual uploading
///
/// # Arguments
///
/// * `glean` - the Glean instance to use to send the ping.
/// * `reason` - the reason the ping was triggered. Included in the
/// `ping_info.reason` part of the payload.
///
/// # Returns
///
/// See [`Glean#submit_ping`](../struct.Glean.html#method.submit_ping) for details.
pub fn submit(&self, glean: &Glean, reason: Option<&str>) -> Result<bool> {
let corrected_reason = match reason {
Some(reason) => {
if self.reason_codes.contains(&reason.to_string()) {
Some(reason)
} else {
log::error!("Invalid reason code {} for ping {}", reason, self.name);
None
}
}
None => None,
};
glean.submit_ping(self, corrected_reason)
}
}

Просмотреть файл

@ -1,86 +1,86 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A quantity metric.
///
/// Used to store explicit non-negative integers.
#[derive(Clone, Debug)]
pub struct QuantityMetric {
meta: CommonMetricData,
}
impl MetricType for QuantityMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl QuantityMetric {
/// Creates a new quantity metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets the value. Must be non-negative.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The value. Must be non-negative.
///
/// ## Notes
///
/// Logs an error if the `value` is negative.
pub fn set(&self, glean: &Glean, value: i64) {
if !self.should_record(glean) {
return;
}
if value < 0 {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
format!("Set negative value {}", value),
None,
);
return;
}
glean
.storage()
.record(glean, &self.meta, &Metric::Quantity(value))
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<i64> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Quantity(i)) => Some(i),
_ => None,
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A quantity metric.
///
/// Used to store explicit non-negative integers.
#[derive(Clone, Debug)]
pub struct QuantityMetric {
meta: CommonMetricData,
}
impl MetricType for QuantityMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl QuantityMetric {
/// Creates a new quantity metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets the value. Must be non-negative.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The value. Must be non-negative.
///
/// ## Notes
///
/// Logs an error if the `value` is negative.
pub fn set(&self, glean: &Glean, value: i64) {
if !self.should_record(glean) {
return;
}
if value < 0 {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
format!("Set negative value {}", value),
None,
);
return;
}
glean
.storage()
.record(glean, &self.meta, &Metric::Quantity(value))
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<i64> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Quantity(i)) => Some(i),
_ => None,
}
}
}

Просмотреть файл

@ -1,115 +1,115 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
use crate::Glean;
const MAX_LENGTH_VALUE: usize = 100;
/// A string metric.
///
/// Record an Unicode string value with arbitrary content.
/// Strings are length-limited to `MAX_LENGTH_VALUE` bytes.
#[derive(Clone, Debug)]
pub struct StringMetric {
meta: CommonMetricData,
}
impl MetricType for StringMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl StringMetric {
/// Creates a new string metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified value.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The string to set the metric to.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
pub fn set<S: Into<String>>(&self, glean: &Glean, value: S) {
if !self.should_record(glean) {
return;
}
let s = truncate_string_at_boundary_with_error(glean, &self.meta, value, MAX_LENGTH_VALUE);
let value = Metric::String(s);
glean.storage().record(glean, &self.meta, &value)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::String(s)) => Some(s),
_ => None,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::test_get_num_recorded_errors;
use crate::tests::new_glean;
use crate::util::truncate_string_at_boundary;
use crate::ErrorType;
use crate::Lifetime;
#[test]
fn setting_a_long_string_records_an_error() {
let (glean, _) = new_glean(None);
let metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "test".into(),
send_in_pings: vec!["store1".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
});
let sample_string = "0123456789".repeat(11);
metric.set(&glean, sample_string.clone());
let truncated = truncate_string_at_boundary(sample_string, MAX_LENGTH_VALUE);
assert_eq!(truncated, metric.test_get_value(&glean, "store1").unwrap());
assert_eq!(
1,
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow, None)
.unwrap()
);
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
use crate::Glean;
const MAX_LENGTH_VALUE: usize = 100;
/// A string metric.
///
/// Record an Unicode string value with arbitrary content.
/// Strings are length-limited to `MAX_LENGTH_VALUE` bytes.
#[derive(Clone, Debug)]
pub struct StringMetric {
meta: CommonMetricData,
}
impl MetricType for StringMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl StringMetric {
/// Creates a new string metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified value.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The string to set the metric to.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
pub fn set<S: Into<String>>(&self, glean: &Glean, value: S) {
if !self.should_record(glean) {
return;
}
let s = truncate_string_at_boundary_with_error(glean, &self.meta, value, MAX_LENGTH_VALUE);
let value = Metric::String(s);
glean.storage().record(glean, &self.meta, &value)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::String(s)) => Some(s),
_ => None,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::test_get_num_recorded_errors;
use crate::tests::new_glean;
use crate::util::truncate_string_at_boundary;
use crate::ErrorType;
use crate::Lifetime;
#[test]
fn setting_a_long_string_records_an_error() {
let (glean, _) = new_glean(None);
let metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "test".into(),
send_in_pings: vec!["store1".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
});
let sample_string = "0123456789".repeat(11);
metric.set(&glean, sample_string.clone());
let truncated = truncate_string_at_boundary(sample_string, MAX_LENGTH_VALUE);
assert_eq!(truncated, metric.test_get_value(&glean, "store1").unwrap());
assert_eq!(
1,
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow, None)
.unwrap()
);
}
}

Просмотреть файл

@ -1,158 +1,158 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
use crate::Glean;
// Maximum length of any list
const MAX_LIST_LENGTH: usize = 20;
// Maximum length of any string in the list
const MAX_STRING_LENGTH: usize = 50;
/// A string list metric.
///
/// This allows appending a string value with arbitrary content to a list.
#[derive(Clone, Debug)]
pub struct StringListMetric {
meta: CommonMetricData,
}
impl MetricType for StringListMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl StringListMetric {
/// Creates a new string list metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Adds a new string to the list.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The string to add.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
pub fn add<S: Into<String>>(&self, glean: &Glean, value: S) {
if !self.should_record(glean) {
return;
}
let value =
truncate_string_at_boundary_with_error(glean, &self.meta, value, MAX_STRING_LENGTH);
let mut error = None;
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::StringList(mut old_value)) => {
if old_value.len() == MAX_LIST_LENGTH {
let msg = format!(
"String list length of {} exceeds maximum of {}",
old_value.len() + 1,
MAX_LIST_LENGTH
);
error = Some(msg);
} else {
old_value.push(value.clone());
}
Metric::StringList(old_value)
}
_ => Metric::StringList(vec![value.clone()]),
});
if let Some(msg) = error {
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
}
}
/// Sets to a specific list of strings.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The list of string to set the metric to.
///
/// ## Notes
///
/// If passed an empty list, records an error and returns.
/// Truncates the list if it is longer than `MAX_LIST_LENGTH` and logs an error.
/// Truncates any value in the list if it is longer than `MAX_STRING_LENGTH` and logs an error.
pub fn set(&self, glean: &Glean, value: Vec<String>) {
if !self.should_record(glean) {
return;
}
let value = if value.len() > MAX_LIST_LENGTH {
let msg = format!(
"StringList length {} exceeds maximum of {}",
value.len(),
MAX_LIST_LENGTH
);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
value[0..MAX_LIST_LENGTH].to_vec()
} else {
value
};
let value = value
.into_iter()
.map(|elem| {
truncate_string_at_boundary_with_error(glean, &self.meta, elem, MAX_STRING_LENGTH)
})
.collect();
let value = Metric::StringList(value);
glean.storage().record(glean, &self.meta, &value);
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored values.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<Vec<String>> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::StringList(values)) => Some(values),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored values as a JSON String of the format
/// ["string1", "string2", ...]
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|values| serde_json::to_string(&values).unwrap())
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
use crate::Glean;
// Maximum length of any list
const MAX_LIST_LENGTH: usize = 20;
// Maximum length of any string in the list
const MAX_STRING_LENGTH: usize = 50;
/// A string list metric.
///
/// This allows appending a string value with arbitrary content to a list.
#[derive(Clone, Debug)]
pub struct StringListMetric {
meta: CommonMetricData,
}
impl MetricType for StringListMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl StringListMetric {
/// Creates a new string list metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Adds a new string to the list.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The string to add.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
pub fn add<S: Into<String>>(&self, glean: &Glean, value: S) {
if !self.should_record(glean) {
return;
}
let value =
truncate_string_at_boundary_with_error(glean, &self.meta, value, MAX_STRING_LENGTH);
let mut error = None;
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::StringList(mut old_value)) => {
if old_value.len() == MAX_LIST_LENGTH {
let msg = format!(
"String list length of {} exceeds maximum of {}",
old_value.len() + 1,
MAX_LIST_LENGTH
);
error = Some(msg);
} else {
old_value.push(value.clone());
}
Metric::StringList(old_value)
}
_ => Metric::StringList(vec![value.clone()]),
});
if let Some(msg) = error {
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
}
}
/// Sets to a specific list of strings.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The list of string to set the metric to.
///
/// ## Notes
///
/// If passed an empty list, records an error and returns.
/// Truncates the list if it is longer than `MAX_LIST_LENGTH` and logs an error.
/// Truncates any value in the list if it is longer than `MAX_STRING_LENGTH` and logs an error.
pub fn set(&self, glean: &Glean, value: Vec<String>) {
if !self.should_record(glean) {
return;
}
let value = if value.len() > MAX_LIST_LENGTH {
let msg = format!(
"StringList length {} exceeds maximum of {}",
value.len(),
MAX_LIST_LENGTH
);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
value[0..MAX_LIST_LENGTH].to_vec()
} else {
value
};
let value = value
.into_iter()
.map(|elem| {
truncate_string_at_boundary_with_error(glean, &self.meta, elem, MAX_STRING_LENGTH)
})
.collect();
let value = Metric::StringList(value);
glean.storage().record(glean, &self.meta, &value);
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored values.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<Vec<String>> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::StringList(values)) => Some(values),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored values as a JSON String of the format
/// ["string1", "string2", ...]
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|values| serde_json::to_string(&values).unwrap())
}
}

Просмотреть файл

@ -1,117 +1,117 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use std::time::Duration;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
/// Different resolutions supported by the time related
/// metric types (e.g. DatetimeMetric).
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "lowercase")]
#[repr(i32)] // use i32 to be compatible with our JNA definition
pub enum TimeUnit {
/// Truncate to nanosecond precision.
Nanosecond,
/// Truncate to microsecond precision.
Microsecond,
/// Truncate to millisecond precision.
Millisecond,
/// Truncate to second precision.
Second,
/// Truncate to minute precision.
Minute,
/// Truncate to hour precision.
Hour,
/// Truncate to day precision.
Day,
}
impl TimeUnit {
/// Formats the given time unit, truncating the time if needed.
pub fn format_pattern(self) -> &'static str {
use TimeUnit::*;
match self {
Nanosecond => "%Y-%m-%dT%H:%M:%S%.f%:z",
Microsecond => "%Y-%m-%dT%H:%M:%S%.6f%:z",
Millisecond => "%Y-%m-%dT%H:%M:%S%.3f%:z",
Second => "%Y-%m-%dT%H:%M:%S%:z",
Minute => "%Y-%m-%dT%H:%M%:z",
Hour => "%Y-%m-%dT%H%:z",
Day => "%Y-%m-%d%:z",
}
}
/// Converts a duration to the requested time unit.
///
/// # Arguments
///
/// * `duration` - the duration to convert.
///
/// # Returns
///
/// The integer representation of the converted duration.
pub fn duration_convert(self, duration: Duration) -> u64 {
use TimeUnit::*;
match self {
Nanosecond => duration.as_nanos() as u64,
Microsecond => duration.as_micros() as u64,
Millisecond => duration.as_millis() as u64,
Second => duration.as_secs(),
Minute => duration.as_secs() / 60,
Hour => duration.as_secs() / 60 / 60,
Day => duration.as_secs() / 60 / 60 / 24,
}
}
/// Converts a duration in the given unit to nanoseconds.
///
/// # Arguments
///
/// * `duration` - the duration to convert.
///
/// # Returns
///
/// The integer representation of the nanosecond duration.
pub fn as_nanos(self, duration: u64) -> u64 {
use TimeUnit::*;
let duration = match self {
Nanosecond => Duration::from_nanos(duration),
Microsecond => Duration::from_micros(duration),
Millisecond => Duration::from_millis(duration),
Second => Duration::from_secs(duration),
Minute => Duration::from_secs(duration * 60),
Hour => Duration::from_secs(duration * 60 * 60),
Day => Duration::from_secs(duration * 60 * 60 * 24),
};
duration.as_nanos() as u64
}
}
/// Trait implementation for converting an integer value to a TimeUnit.
///
/// This is used in the FFI code.
///
/// Please note that values should match the ordering of the
/// platform specific side of things (e.g. Kotlin implementation).
impl TryFrom<i32> for TimeUnit {
type Error = Error;
fn try_from(value: i32) -> Result<TimeUnit, Self::Error> {
match value {
0 => Ok(TimeUnit::Nanosecond),
1 => Ok(TimeUnit::Microsecond),
2 => Ok(TimeUnit::Millisecond),
3 => Ok(TimeUnit::Second),
4 => Ok(TimeUnit::Minute),
5 => Ok(TimeUnit::Hour),
6 => Ok(TimeUnit::Day),
e => Err(ErrorKind::TimeUnit(e).into()),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use std::time::Duration;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
/// Different resolutions supported by the time related
/// metric types (e.g. DatetimeMetric).
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "lowercase")]
#[repr(i32)] // use i32 to be compatible with our JNA definition
pub enum TimeUnit {
/// Truncate to nanosecond precision.
Nanosecond,
/// Truncate to microsecond precision.
Microsecond,
/// Truncate to millisecond precision.
Millisecond,
/// Truncate to second precision.
Second,
/// Truncate to minute precision.
Minute,
/// Truncate to hour precision.
Hour,
/// Truncate to day precision.
Day,
}
impl TimeUnit {
/// Formats the given time unit, truncating the time if needed.
pub fn format_pattern(self) -> &'static str {
use TimeUnit::*;
match self {
Nanosecond => "%Y-%m-%dT%H:%M:%S%.f%:z",
Microsecond => "%Y-%m-%dT%H:%M:%S%.6f%:z",
Millisecond => "%Y-%m-%dT%H:%M:%S%.3f%:z",
Second => "%Y-%m-%dT%H:%M:%S%:z",
Minute => "%Y-%m-%dT%H:%M%:z",
Hour => "%Y-%m-%dT%H%:z",
Day => "%Y-%m-%d%:z",
}
}
/// Converts a duration to the requested time unit.
///
/// # Arguments
///
/// * `duration` - the duration to convert.
///
/// # Returns
///
/// The integer representation of the converted duration.
pub fn duration_convert(self, duration: Duration) -> u64 {
use TimeUnit::*;
match self {
Nanosecond => duration.as_nanos() as u64,
Microsecond => duration.as_micros() as u64,
Millisecond => duration.as_millis() as u64,
Second => duration.as_secs(),
Minute => duration.as_secs() / 60,
Hour => duration.as_secs() / 60 / 60,
Day => duration.as_secs() / 60 / 60 / 24,
}
}
/// Converts a duration in the given unit to nanoseconds.
///
/// # Arguments
///
/// * `duration` - the duration to convert.
///
/// # Returns
///
/// The integer representation of the nanosecond duration.
pub fn as_nanos(self, duration: u64) -> u64 {
use TimeUnit::*;
let duration = match self {
Nanosecond => Duration::from_nanos(duration),
Microsecond => Duration::from_micros(duration),
Millisecond => Duration::from_millis(duration),
Second => Duration::from_secs(duration),
Minute => Duration::from_secs(duration * 60),
Hour => Duration::from_secs(duration * 60 * 60),
Day => Duration::from_secs(duration * 60 * 60 * 24),
};
duration.as_nanos() as u64
}
}
/// Trait implementation for converting an integer value to a TimeUnit.
///
/// This is used in the FFI code.
///
/// Please note that values should match the ordering of the
/// platform specific side of things (e.g. Kotlin implementation).
impl TryFrom<i32> for TimeUnit {
type Error = Error;
fn try_from(value: i32) -> Result<TimeUnit, Self::Error> {
match value {
0 => Ok(TimeUnit::Nanosecond),
1 => Ok(TimeUnit::Microsecond),
2 => Ok(TimeUnit::Millisecond),
3 => Ok(TimeUnit::Second),
4 => Ok(TimeUnit::Minute),
5 => Ok(TimeUnit::Hour),
6 => Ok(TimeUnit::Day),
e => Err(ErrorKind::TimeUnit(e).into()),
}
}
}

Просмотреть файл

@ -1,178 +1,178 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::time::Duration;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A timespan metric.
///
/// Timespans are used to make a measurement of how much time is spent in a particular task.
#[derive(Debug)]
pub struct TimespanMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
start_time: Option<u64>,
}
impl MetricType for TimespanMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl TimespanMetric {
/// Creates a new timespan metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self {
meta,
time_unit,
start_time: None,
}
}
/// Starts tracking time for the provided metric.
///
/// This records an error if it's already tracking time (i.e. start was already
/// called with no corresponding `stop`): in that case the original
/// start time will be preserved.
pub fn set_start(&mut self, glean: &Glean, start_time: u64) {
if !self.should_record(glean) {
return;
}
if self.start_time.is_some() {
record_error(
glean,
&self.meta,
ErrorType::InvalidState,
"Timespan already started",
None,
);
return;
}
self.start_time = Some(start_time);
}
/// Stops tracking time for the provided metric. Sets the metric to the elapsed time.
///
/// This will record an error if no `start` was called.
pub fn set_stop(&mut self, glean: &Glean, stop_time: u64) {
if !self.should_record(glean) {
// Reset timer when disabled, so that we don't record timespans across
// disabled/enabled toggling.
self.start_time = None;
return;
}
if self.start_time.is_none() {
record_error(
glean,
&self.meta,
ErrorType::InvalidState,
"Timespan not running",
None,
);
return;
}
let duration = stop_time - self.start_time.take().unwrap();
let duration = Duration::from_nanos(duration);
self.set_raw(glean, duration, false);
}
/// Aborts a previous `start` call. No error is recorded if no `start` was called.
pub fn cancel(&mut self) {
self.start_time = None;
}
/// Explicitly sets the timespan value.
///
/// This API should only be used if your library or application requires recording
/// times in a way that can not make use of `start`/`stop`/`cancel`.
///
/// Care should be taken using this if the ping lifetime might contain more than one
/// timespan measurement. To be safe, `set_raw` should generally be followed by
/// sending a custom ping containing the timespan.
///
/// # Arguments
///
/// * `elapsed` - The elapsed time to record.
/// * `overwrite` - Whether or not to overwrite existing data.
pub fn set_raw(&self, glean: &Glean, elapsed: Duration, overwrite: bool) {
if !self.should_record(glean) {
return;
}
if self.start_time.is_some() {
record_error(
glean,
&self.meta,
ErrorType::InvalidState,
"Timespan already running. Raw value not recorded.",
None,
);
return;
}
let mut report_value_exists: bool = false;
glean.storage().record_with(glean, &self.meta, |old_value| {
if overwrite {
Metric::Timespan(elapsed, self.time_unit)
} else {
match old_value {
Some(old @ Metric::Timespan(..)) => {
// If some value already exists, report an error.
// We do this out of the storage since recording an
// error accesses the storage as well.
report_value_exists = true;
old
}
_ => Metric::Timespan(elapsed, self.time_unit),
}
}
});
if report_value_exists {
record_error(
glean,
&self.meta,
ErrorType::InvalidState,
"Timespan value already recorded. New value discarded.",
None,
);
};
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<u64> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Timespan(time, time_unit)) => Some(time_unit.duration_convert(time)),
_ => None,
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::time::Duration;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A timespan metric.
///
/// Timespans are used to make a measurement of how much time is spent in a particular task.
#[derive(Debug)]
pub struct TimespanMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
start_time: Option<u64>,
}
impl MetricType for TimespanMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl TimespanMetric {
/// Creates a new timespan metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self {
meta,
time_unit,
start_time: None,
}
}
/// Starts tracking time for the provided metric.
///
/// This records an error if it's already tracking time (i.e. start was already
/// called with no corresponding `stop`): in that case the original
/// start time will be preserved.
pub fn set_start(&mut self, glean: &Glean, start_time: u64) {
if !self.should_record(glean) {
return;
}
if self.start_time.is_some() {
record_error(
glean,
&self.meta,
ErrorType::InvalidState,
"Timespan already started",
None,
);
return;
}
self.start_time = Some(start_time);
}
/// Stops tracking time for the provided metric. Sets the metric to the elapsed time.
///
/// This will record an error if no `start` was called.
pub fn set_stop(&mut self, glean: &Glean, stop_time: u64) {
if !self.should_record(glean) {
// Reset timer when disabled, so that we don't record timespans across
// disabled/enabled toggling.
self.start_time = None;
return;
}
if self.start_time.is_none() {
record_error(
glean,
&self.meta,
ErrorType::InvalidState,
"Timespan not running",
None,
);
return;
}
let duration = stop_time - self.start_time.take().unwrap();
let duration = Duration::from_nanos(duration);
self.set_raw(glean, duration, false);
}
/// Aborts a previous `start` call. No error is recorded if no `start` was called.
pub fn cancel(&mut self) {
self.start_time = None;
}
/// Explicitly sets the timespan value.
///
/// This API should only be used if your library or application requires recording
/// times in a way that can not make use of `start`/`stop`/`cancel`.
///
/// Care should be taken using this if the ping lifetime might contain more than one
/// timespan measurement. To be safe, `set_raw` should generally be followed by
/// sending a custom ping containing the timespan.
///
/// # Arguments
///
/// * `elapsed` - The elapsed time to record.
/// * `overwrite` - Whether or not to overwrite existing data.
pub fn set_raw(&self, glean: &Glean, elapsed: Duration, overwrite: bool) {
if !self.should_record(glean) {
return;
}
if self.start_time.is_some() {
record_error(
glean,
&self.meta,
ErrorType::InvalidState,
"Timespan already running. Raw value not recorded.",
None,
);
return;
}
let mut report_value_exists: bool = false;
glean.storage().record_with(glean, &self.meta, |old_value| {
if overwrite {
Metric::Timespan(elapsed, self.time_unit)
} else {
match old_value {
Some(old @ Metric::Timespan(..)) => {
// If some value already exists, report an error.
// We do this out of the storage since recording an
// error accesses the storage as well.
report_value_exists = true;
old
}
_ => Metric::Timespan(elapsed, self.time_unit),
}
}
});
if report_value_exists {
record_error(
glean,
&self.meta,
ErrorType::InvalidState,
"Timespan value already recorded. New value discarded.",
None,
);
};
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<u64> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Timespan(time, time_unit)) => Some(time_unit.duration_convert(time)),
_ => None,
}
}
}

Просмотреть файл

@ -1,408 +1,408 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
// The base of the logarithm used to determine bucketing
const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 8.0;
// Maximum time, which means we retain a maximum of 316 buckets.
// It is automatically adjusted based on the `time_unit` parameter
// so that:
//
// - `nanosecond` - 10 minutes
// - `microsecond` - ~6.94 days
// - `millisecond` - ~19 years
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
/// Identifier for a running timer.
pub type TimerId = u64;
#[derive(Debug, Clone)]
struct Timings {
next_id: TimerId,
start_times: HashMap<TimerId, u64>,
}
/// Track different running timers, identified by a `TimerId`.
impl Timings {
/// Create a new timing manager.
fn new() -> Self {
Self {
next_id: 0,
start_times: HashMap::new(),
}
}
/// Start a new timer and set it to the `start_time`.
///
/// Returns a new `TimerId` identifying the timer.
fn set_start(&mut self, start_time: u64) -> TimerId {
let id = self.next_id;
self.next_id += 1;
self.start_times.insert(id, start_time);
id
}
/// Stop the timer and return the elapsed time.
///
/// Returns an error if the `id` does not correspond to a running timer.
/// Returns an error if the stop time is before the start time.
///
/// ## Note
///
/// This API exists to satisfy the FFI requirements, where the clock is handled on the
/// application side and passed in as a timestamp.
fn set_stop(&mut self, id: TimerId, stop_time: u64) -> Result<u64, (ErrorType, &str)> {
let start_time = match self.start_times.remove(&id) {
Some(start_time) => start_time,
None => return Err((ErrorType::InvalidState, "Timing not running")),
};
let duration = match stop_time.checked_sub(start_time) {
Some(duration) => duration,
None => {
return Err((
ErrorType::InvalidValue,
"Timer stopped with negative duration",
))
}
};
Ok(duration)
}
/// Cancel and remove the timer.
fn cancel(&mut self, id: TimerId) {
self.start_times.remove(&id);
}
}
/// A timing distribution metric.
///
/// Timing distributions are used to accumulate and store time measurement, for analyzing distributions of the timing data.
#[derive(Debug)]
pub struct TimingDistributionMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
timings: Timings,
}
/// Create a snapshot of the histogram with a time unit.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
sum: hist.sum(),
}
}
impl MetricType for TimingDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl TimingDistributionMetric {
/// Creates a new timing distribution metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self {
meta,
time_unit,
timings: Timings::new(),
}
}
/// Starts tracking time for the provided metric.
///
/// This records an error if its already tracking time (i.e. start was already
/// called with no corresponding [stop]): in that case the original
/// start time will be preserved.
///
/// # Arguments
///
/// * `start_time` - Timestamp in nanoseconds.
///
/// # Returns
///
/// A unique `TimerId` for the new timer.
pub fn set_start(&mut self, start_time: u64) -> TimerId {
self.timings.set_start(start_time)
}
/// Stops tracking time for the provided metric and associated timer id.
///
/// Adds a count to the corresponding bucket in the timing distribution.
/// This will record an error if no `start` was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timespan metric.
/// * `stop_time` - Timestamp in nanoseconds.
pub fn set_stop_and_accumulate(&mut self, glean: &Glean, id: TimerId, stop_time: u64) {
// Duration is in nanoseconds.
let mut duration = match self.timings.set_stop(id, stop_time) {
Err((err_type, err_msg)) => {
record_error(glean, &self.meta, err_type, err_msg, None);
return;
}
Ok(duration) => duration,
};
let min_sample_time = self.time_unit.as_nanos(1);
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
duration = if duration < min_sample_time {
// If measurement is less than the minimum, just truncate. This is
// not recorded as an error.
min_sample_time
} else if duration > max_sample_time {
let msg = format!(
"Sample is longer than the max for a time_unit of {:?} ({} ns)",
self.time_unit, max_sample_time
);
record_error(glean, &self.meta, ErrorType::InvalidOverflow, msg, None);
max_sample_time
} else {
duration
};
if !self.should_record(glean) {
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::TimingDistribution(mut hist)) => {
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
});
}
/// Aborts a previous `set_start` call. No error is recorded if no `set_start`
/// was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timing distribution metric.
pub fn cancel(&mut self, id: TimerId) {
self.timings.cancel(id);
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [TimingDistributionMetricType] and the instance this
/// method was called on is using [TimeUnit.Second], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them. Reports an `ErrorType::InvalidOverflow` error for samples that
/// are longer than `MAX_SAMPLE_TIME`.
pub fn accumulate_samples_signed(&mut self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
let mut num_too_long_samples = 0;
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::TimingDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let mut sample = sample as u64;
// Check the range prior to converting the incoming unit to
// nanoseconds, so we can compare against the constant
// MAX_SAMPLE_TIME.
if sample == 0 {
sample = 1;
} else if sample > MAX_SAMPLE_TIME {
num_too_long_samples += 1;
sample = MAX_SAMPLE_TIME;
}
sample = self.time_unit.as_nanos(sample);
hist.accumulate(sample);
}
}
Metric::TimingDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_long_samples > 0 {
let msg = format!(
"{} samples are longer than the maximum of {}",
num_too_long_samples, max_sample_time
);
record_error(
glean,
&self.meta,
ErrorType::InvalidOverflow,
msg,
num_too_long_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::TimingDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_snapshot() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
for i in 1..=10 {
hist.accumulate(i);
}
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 55,
"values": {
"1": 1,
"2": 1,
"3": 1,
"4": 1,
"5": 1,
"6": 1,
"7": 1,
"8": 1,
"9": 1,
"10": 1,
"11": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
#[test]
fn can_snapshot_sparse() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
hist.accumulate(1024);
hist.accumulate(1024);
hist.accumulate(1116);
hist.accumulate(1448);
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 4612,
"values": {
"1024": 2,
"1116": 1,
"1217": 0,
"1327": 0,
"1448": 1,
"1579": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
// The base of the logarithm used to determine bucketing
const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 8.0;
// Maximum time, which means we retain a maximum of 316 buckets.
// It is automatically adjusted based on the `time_unit` parameter
// so that:
//
// - `nanosecond` - 10 minutes
// - `microsecond` - ~6.94 days
// - `millisecond` - ~19 years
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
/// Identifier for a running timer.
pub type TimerId = u64;
#[derive(Debug, Clone)]
struct Timings {
next_id: TimerId,
start_times: HashMap<TimerId, u64>,
}
/// Track different running timers, identified by a `TimerId`.
impl Timings {
/// Create a new timing manager.
fn new() -> Self {
Self {
next_id: 0,
start_times: HashMap::new(),
}
}
/// Start a new timer and set it to the `start_time`.
///
/// Returns a new `TimerId` identifying the timer.
fn set_start(&mut self, start_time: u64) -> TimerId {
let id = self.next_id;
self.next_id += 1;
self.start_times.insert(id, start_time);
id
}
/// Stop the timer and return the elapsed time.
///
/// Returns an error if the `id` does not correspond to a running timer.
/// Returns an error if the stop time is before the start time.
///
/// ## Note
///
/// This API exists to satisfy the FFI requirements, where the clock is handled on the
/// application side and passed in as a timestamp.
fn set_stop(&mut self, id: TimerId, stop_time: u64) -> Result<u64, (ErrorType, &str)> {
let start_time = match self.start_times.remove(&id) {
Some(start_time) => start_time,
None => return Err((ErrorType::InvalidState, "Timing not running")),
};
let duration = match stop_time.checked_sub(start_time) {
Some(duration) => duration,
None => {
return Err((
ErrorType::InvalidValue,
"Timer stopped with negative duration",
))
}
};
Ok(duration)
}
/// Cancel and remove the timer.
fn cancel(&mut self, id: TimerId) {
self.start_times.remove(&id);
}
}
/// A timing distribution metric.
///
/// Timing distributions are used to accumulate and store time measurement, for analyzing distributions of the timing data.
#[derive(Debug)]
pub struct TimingDistributionMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
timings: Timings,
}
/// Create a snapshot of the histogram with a time unit.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
sum: hist.sum(),
}
}
impl MetricType for TimingDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl TimingDistributionMetric {
/// Creates a new timing distribution metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self {
meta,
time_unit,
timings: Timings::new(),
}
}
/// Starts tracking time for the provided metric.
///
/// This records an error if its already tracking time (i.e. start was already
/// called with no corresponding [stop]): in that case the original
/// start time will be preserved.
///
/// # Arguments
///
/// * `start_time` - Timestamp in nanoseconds.
///
/// # Returns
///
/// A unique `TimerId` for the new timer.
pub fn set_start(&mut self, start_time: u64) -> TimerId {
self.timings.set_start(start_time)
}
/// Stops tracking time for the provided metric and associated timer id.
///
/// Adds a count to the corresponding bucket in the timing distribution.
/// This will record an error if no `start` was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timespan metric.
/// * `stop_time` - Timestamp in nanoseconds.
pub fn set_stop_and_accumulate(&mut self, glean: &Glean, id: TimerId, stop_time: u64) {
// Duration is in nanoseconds.
let mut duration = match self.timings.set_stop(id, stop_time) {
Err((err_type, err_msg)) => {
record_error(glean, &self.meta, err_type, err_msg, None);
return;
}
Ok(duration) => duration,
};
let min_sample_time = self.time_unit.as_nanos(1);
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
duration = if duration < min_sample_time {
// If measurement is less than the minimum, just truncate. This is
// not recorded as an error.
min_sample_time
} else if duration > max_sample_time {
let msg = format!(
"Sample is longer than the max for a time_unit of {:?} ({} ns)",
self.time_unit, max_sample_time
);
record_error(glean, &self.meta, ErrorType::InvalidOverflow, msg, None);
max_sample_time
} else {
duration
};
if !self.should_record(glean) {
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::TimingDistribution(mut hist)) => {
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
});
}
/// Aborts a previous `set_start` call. No error is recorded if no `set_start`
/// was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timing distribution metric.
pub fn cancel(&mut self, id: TimerId) {
self.timings.cancel(id);
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [TimingDistributionMetricType] and the instance this
/// method was called on is using [TimeUnit.Second], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them. Reports an `ErrorType::InvalidOverflow` error for samples that
/// are longer than `MAX_SAMPLE_TIME`.
pub fn accumulate_samples_signed(&mut self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
let mut num_too_long_samples = 0;
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::TimingDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let mut sample = sample as u64;
// Check the range prior to converting the incoming unit to
// nanoseconds, so we can compare against the constant
// MAX_SAMPLE_TIME.
if sample == 0 {
sample = 1;
} else if sample > MAX_SAMPLE_TIME {
num_too_long_samples += 1;
sample = MAX_SAMPLE_TIME;
}
sample = self.time_unit.as_nanos(sample);
hist.accumulate(sample);
}
}
Metric::TimingDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_long_samples > 0 {
let msg = format!(
"{} samples are longer than the maximum of {}",
num_too_long_samples, max_sample_time
);
record_error(
glean,
&self.meta,
ErrorType::InvalidOverflow,
msg,
num_too_long_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::TimingDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_snapshot() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
for i in 1..=10 {
hist.accumulate(i);
}
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 55,
"values": {
"1": 1,
"2": 1,
"3": 1,
"4": 1,
"5": 1,
"6": 1,
"7": 1,
"8": 1,
"9": 1,
"10": 1,
"11": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
#[test]
fn can_snapshot_sparse() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
hist.accumulate(1024);
hist.accumulate(1024);
hist.accumulate(1116);
hist.accumulate(1448);
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 4612,
"values": {
"1024": 2,
"1116": 1,
"1217": 0,
"1327": 0,
"1448": 1,
"1579": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
}

Просмотреть файл

@ -1,120 +1,104 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use uuid::Uuid;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// An UUID metric.
///
/// Stores UUID v4 (randomly generated) values.
#[derive(Clone, Debug)]
pub struct UuidMetric {
meta: CommonMetricData,
}
impl MetricType for UuidMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl UuidMetric {
/// Creates a new UUID metric
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified value.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The UUID to set the metric to.
pub fn set(&self, glean: &Glean, value: Uuid) {
if !self.should_record(glean) {
return;
}
let s = value.to_string();
let value = Metric::Uuid(s);
glean.storage().record(glean, &self.meta, &value)
}
/// Sets to the specified value, from a string.
///
/// This should only be used from FFI. When calling directly from Rust, it
/// is better to use `set`.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The UUID to set the metric to.
pub fn set_from_str(&self, glean: &Glean, value: &str) {
if !self.should_record(glean) {
return;
}
if let Ok(uuid) = uuid::Uuid::parse_str(&value) {
self.set(glean, uuid);
} else {
let msg = format!("Unexpected UUID value '{}'", value);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
}
}
/// Generates a new random UUID and set the metric to it.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
pub fn generate_and_set(&self, storage: &Glean) -> Uuid {
let uuid = Uuid::new_v4();
self.set(storage, uuid);
uuid
}
/// Gets the stored Uuid value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `storage_name` - the storage name to look into.
///
/// # Returns
///
/// The stored value or `None` if nothing stored.
pub(crate) fn get_value(&self, glean: &Glean, storage_name: &str) -> Option<Uuid> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta().identifier(glean),
) {
Some(Metric::Uuid(uuid)) => Uuid::parse_str(&uuid).ok(),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<Uuid> {
self.get_value(glean, storage_name)
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use uuid::Uuid;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// An UUID metric.
///
/// Stores UUID v4 (randomly generated) values.
#[derive(Clone, Debug)]
pub struct UuidMetric {
meta: CommonMetricData,
}
impl MetricType for UuidMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl UuidMetric {
/// Creates a new UUID metric
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified value.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The UUID to set the metric to.
pub fn set(&self, glean: &Glean, value: Uuid) {
if !self.should_record(glean) {
return;
}
let s = value.to_string();
let value = Metric::Uuid(s);
glean.storage().record(glean, &self.meta, &value)
}
/// Generates a new random UUID and set the metric to it.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
pub fn generate_and_set(&self, storage: &Glean) -> Uuid {
let uuid = Uuid::new_v4();
self.set(storage, uuid);
uuid
}
/// Gets the stored Uuid value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `storage_name` - the storage name to look into.
///
/// # Returns
///
/// The stored value or `None` if nothing stored.
pub(crate) fn get_value(&self, glean: &Glean, storage_name: &str) -> Option<Uuid> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta().identifier(glean),
) {
Some(Metric::Uuid(uuid)) => Uuid::parse_str(&uuid).ok(),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Uuid(s)) => Some(s),
_ => None,
}
}
}

782
third_party/rust/glean-core/src/ping/mod.rs поставляемый
Просмотреть файл

@ -1,391 +1,391 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Ping collection, assembly & submission.
use std::fs::{create_dir_all, File};
use std::io::Write;
use std::path::{Path, PathBuf};
use log::info;
use serde_json::{json, Value as JsonValue};
use crate::common_metric_data::{CommonMetricData, Lifetime};
use crate::metrics::{CounterMetric, DatetimeMetric, Metric, MetricType, PingType, TimeUnit};
use crate::storage::StorageManager;
use crate::util::{get_iso_time_string, local_now_with_offset};
use crate::{
Glean, Result, DELETION_REQUEST_PINGS_DIRECTORY, INTERNAL_STORAGE, PENDING_PINGS_DIRECTORY,
};
/// Collect a ping's data, assemble it into its full payload and store it on disk.
pub struct PingMaker;
fn merge(a: &mut JsonValue, b: &JsonValue) {
match (a, b) {
(&mut JsonValue::Object(ref mut a), &JsonValue::Object(ref b)) => {
for (k, v) in b {
merge(a.entry(k.clone()).or_insert(JsonValue::Null), v);
}
}
(a, b) => {
*a = b.clone();
}
}
}
impl Default for PingMaker {
fn default() -> Self {
Self::new()
}
}
impl PingMaker {
/// Creates a new PingMaker.
pub fn new() -> Self {
Self
}
/// Gets, and then increments, the sequence number for a given ping.
///
/// This is crate-internal exclusively for enabling the migration tests.
pub(super) fn get_ping_seq(&self, glean: &Glean, storage_name: &str) -> usize {
// Sequence numbers are stored as a counter under a name that includes the storage name
let seq = CounterMetric::new(CommonMetricData {
name: format!("{}#sequence", storage_name),
// We don't need a category, the name is already unique
category: "".into(),
send_in_pings: vec![INTERNAL_STORAGE.into()],
lifetime: Lifetime::User,
..Default::default()
});
let current_seq = match StorageManager.snapshot_metric(
glean.storage(),
INTERNAL_STORAGE,
&seq.meta().identifier(glean),
) {
Some(Metric::Counter(i)) => i,
_ => 0,
};
// Increase to next sequence id
seq.add(glean, 1);
current_seq as usize
}
/// Gets the formatted start and end times for this ping and update for the next ping.
fn get_start_end_times(&self, glean: &Glean, storage_name: &str) -> (String, String) {
let time_unit = TimeUnit::Minute;
let start_time = DatetimeMetric::new(
CommonMetricData {
name: format!("{}#start", storage_name),
category: "".into(),
send_in_pings: vec![INTERNAL_STORAGE.into()],
lifetime: Lifetime::User,
..Default::default()
},
time_unit,
);
// "start_time" is the time the ping was generated the last time.
// If not available, we use the date the Glean object was initialized.
let start_time_data = start_time
.get_value(glean, INTERNAL_STORAGE)
.unwrap_or_else(|| glean.start_time());
let end_time_data = local_now_with_offset();
// Update the start time with the current time.
start_time.set(glean, Some(end_time_data));
// Format the times.
let start_time_data = get_iso_time_string(start_time_data, time_unit);
let end_time_data = get_iso_time_string(end_time_data, time_unit);
(start_time_data, end_time_data)
}
fn get_ping_info(&self, glean: &Glean, storage_name: &str, reason: Option<&str>) -> JsonValue {
let (start_time, end_time) = self.get_start_end_times(glean, storage_name);
let mut map = json!({
"seq": self.get_ping_seq(glean, storage_name),
"start_time": start_time,
"end_time": end_time,
});
if let Some(reason) = reason {
map.as_object_mut()
.unwrap() // safe unwrap, we created the object above
.insert("reason".to_string(), JsonValue::String(reason.to_string()));
};
// Get the experiment data, if available.
if let Some(experiment_data) =
StorageManager.snapshot_experiments_as_json(glean.storage(), INTERNAL_STORAGE)
{
map.as_object_mut()
.unwrap() // safe unwrap, we created the object above
.insert("experiments".to_string(), experiment_data);
};
map
}
fn get_client_info(&self, glean: &Glean, include_client_id: bool) -> JsonValue {
// Add the "telemetry_sdk_build", which is the glean-core version.
let mut map = json!({
"telemetry_sdk_build": crate::GLEAN_VERSION,
});
// Flatten the whole thing.
if let Some(client_info) =
StorageManager.snapshot_as_json(glean.storage(), "glean_client_info", true)
{
let client_info_obj = client_info.as_object().unwrap(); // safe unwrap, snapshot always returns an object.
for (_key, value) in client_info_obj {
merge(&mut map, value);
}
} else {
log::warn!("Empty client info data.");
}
if !include_client_id {
// safe unwrap, we created the object above
map.as_object_mut().unwrap().remove("client_id");
}
json!(map)
}
/// Build the metadata JSON to be persisted with a ping.
///
/// Currently the only type of metadata we need to persist is the value of the `X-Debug-ID` header.
///
/// # Arguments
///
/// * `glean` - the Glean instance to collect metadata from.
///
/// # Returns
///
/// A JSON object representing the metadata that needs to be persisted with this ping.
///
/// The structure of the metadata json is:
///
/// ```json
/// {
/// "headers": {
/// "X-Debug-ID": "test-tag"
/// }
/// }
/// ```
fn get_metadata(&self, glean: &Glean) -> Option<JsonValue> {
let mut headers_map = json!({});
if let Some(debug_view_tag) = glean.debug_view_tag() {
headers_map
.as_object_mut()
.unwrap() // safe unwrap, we created the object above
.insert(
"X-Debug-ID".to_string(),
JsonValue::String(debug_view_tag.to_string()),
);
}
if let Some(source_tags) = glean.source_tags() {
headers_map
.as_object_mut()
.unwrap() // safe unwrap, we created the object above
.insert(
"X-Source-Tags".to_string(),
JsonValue::String(source_tags.join(",")),
);
}
// safe unwrap, we created the object above
if !headers_map.as_object().unwrap().is_empty() {
Some(json!({
"headers": headers_map,
}))
} else {
None
}
}
/// Collects a snapshot for the given ping from storage and attach required meta information.
///
/// # Arguments
///
/// * `glean` - the Glean instance to collect data from.
/// * `ping` - the ping to collect for.
/// * `reason` - an optional reason code to include in the ping.
///
/// # Returns
///
/// A fully assembled JSON representation of the ping payload.
/// If there is no data stored for the ping, `None` is returned.
pub fn collect(
&self,
glean: &Glean,
ping: &PingType,
reason: Option<&str>,
) -> Option<JsonValue> {
info!("Collecting {}", ping.name);
let metrics_data = StorageManager.snapshot_as_json(glean.storage(), &ping.name, true);
let events_data = glean.event_storage().snapshot_as_json(&ping.name, true);
let is_empty = metrics_data.is_none() && events_data.is_none();
if !ping.send_if_empty && is_empty {
info!("Storage for {} empty. Bailing out.", ping.name);
return None;
} else if is_empty {
info!("Storage for {} empty. Ping will still be sent.", ping.name);
}
let ping_info = self.get_ping_info(glean, &ping.name, reason);
let client_info = self.get_client_info(glean, ping.include_client_id);
let mut json = json!({
"ping_info": ping_info,
"client_info": client_info
});
let json_obj = json.as_object_mut()?;
if let Some(metrics_data) = metrics_data {
json_obj.insert("metrics".to_string(), metrics_data);
}
if let Some(events_data) = events_data {
json_obj.insert("events".to_string(), events_data);
}
Some(json)
}
/// Collects a snapshot for the given ping from storage and attach required meta information.
///
/// # Arguments
///
/// * `glean` - the Glean instance to collect data from.
/// * `ping` - the ping to collect for.
/// * `reason` - an optional reason code to include in the ping.
///
/// # Returns
///
/// A fully assembled ping payload in a string encoded as JSON.
/// If there is no data stored for the ping, `None` is returned.
pub fn collect_string(
&self,
glean: &Glean,
ping: &PingType,
reason: Option<&str>,
) -> Option<String> {
self.collect(glean, ping, reason)
.map(|ping| ::serde_json::to_string_pretty(&ping).unwrap())
}
/// Gets the path to a directory for ping storage.
///
/// The directory will be created inside the `data_path`.
/// The `pings` directory (and its parents) is created if it does not exist.
fn get_pings_dir(&self, data_path: &Path, ping_type: Option<&str>) -> std::io::Result<PathBuf> {
// Use a special directory for deletion-request pings
let pings_dir = match ping_type {
Some(ping_type) if ping_type == "deletion-request" => {
data_path.join(DELETION_REQUEST_PINGS_DIRECTORY)
}
_ => data_path.join(PENDING_PINGS_DIRECTORY),
};
create_dir_all(&pings_dir)?;
Ok(pings_dir)
}
/// Gets path to a directory for temporary storage.
///
/// The directory will be created inside the `data_path`.
/// The `tmp` directory (and its parents) is created if it does not exist.
fn get_tmp_dir(&self, data_path: &Path) -> std::io::Result<PathBuf> {
let pings_dir = data_path.join("tmp");
create_dir_all(&pings_dir)?;
Ok(pings_dir)
}
/// Stores a ping to disk in the pings directory.
pub fn store_ping(
&self,
glean: &Glean,
doc_id: &str,
ping_name: &str,
data_path: &Path,
url_path: &str,
ping_content: &JsonValue,
) -> std::io::Result<()> {
let pings_dir = self.get_pings_dir(data_path, Some(ping_name))?;
let temp_dir = self.get_tmp_dir(data_path)?;
// Write to a temporary location and then move when done,
// for transactional writes.
let temp_ping_path = temp_dir.join(doc_id);
let ping_path = pings_dir.join(doc_id);
log::debug!("Storing ping '{}' at '{}'", doc_id, ping_path.display());
{
let mut file = File::create(&temp_ping_path)?;
file.write_all(url_path.as_bytes())?;
file.write_all(b"\n")?;
file.write_all(::serde_json::to_string(ping_content)?.as_bytes())?;
if let Some(metadata) = self.get_metadata(glean) {
file.write_all(b"\n")?;
file.write_all(::serde_json::to_string(&metadata)?.as_bytes())?;
}
}
if let Err(e) = std::fs::rename(&temp_ping_path, &ping_path) {
log::warn!(
"Unable to move '{}' to '{}",
temp_ping_path.display(),
ping_path.display()
);
return Err(e);
}
Ok(())
}
/// Clears any pending pings in the queue.
pub fn clear_pending_pings(&self, data_path: &Path) -> Result<()> {
let pings_dir = self.get_pings_dir(data_path, None)?;
std::fs::remove_dir_all(&pings_dir)?;
create_dir_all(&pings_dir)?;
log::debug!("All pending pings deleted");
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::tests::new_glean;
#[test]
fn sequence_numbers_should_be_reset_when_toggling_uploading() {
let (mut glean, _) = new_glean(None);
let ping_maker = PingMaker::new();
assert_eq!(0, ping_maker.get_ping_seq(&glean, "custom"));
assert_eq!(1, ping_maker.get_ping_seq(&glean, "custom"));
glean.set_upload_enabled(false);
assert_eq!(0, ping_maker.get_ping_seq(&glean, "custom"));
assert_eq!(0, ping_maker.get_ping_seq(&glean, "custom"));
glean.set_upload_enabled(true);
assert_eq!(0, ping_maker.get_ping_seq(&glean, "custom"));
assert_eq!(1, ping_maker.get_ping_seq(&glean, "custom"));
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Ping collection, assembly & submission.
use std::fs::{create_dir_all, File};
use std::io::Write;
use std::path::{Path, PathBuf};
use log::info;
use serde_json::{json, Value as JsonValue};
use crate::common_metric_data::{CommonMetricData, Lifetime};
use crate::metrics::{CounterMetric, DatetimeMetric, Metric, MetricType, PingType, TimeUnit};
use crate::storage::StorageManager;
use crate::util::{get_iso_time_string, local_now_with_offset};
use crate::{
Glean, Result, DELETION_REQUEST_PINGS_DIRECTORY, INTERNAL_STORAGE, PENDING_PINGS_DIRECTORY,
};
/// Collect a ping's data, assemble it into its full payload and store it on disk.
pub struct PingMaker;
fn merge(a: &mut JsonValue, b: &JsonValue) {
match (a, b) {
(&mut JsonValue::Object(ref mut a), &JsonValue::Object(ref b)) => {
for (k, v) in b {
merge(a.entry(k.clone()).or_insert(JsonValue::Null), v);
}
}
(a, b) => {
*a = b.clone();
}
}
}
impl Default for PingMaker {
fn default() -> Self {
Self::new()
}
}
impl PingMaker {
/// Creates a new PingMaker.
pub fn new() -> Self {
Self
}
/// Gets, and then increments, the sequence number for a given ping.
///
/// This is crate-internal exclusively for enabling the migration tests.
pub(super) fn get_ping_seq(&self, glean: &Glean, storage_name: &str) -> usize {
// Sequence numbers are stored as a counter under a name that includes the storage name
let seq = CounterMetric::new(CommonMetricData {
name: format!("{}#sequence", storage_name),
// We don't need a category, the name is already unique
category: "".into(),
send_in_pings: vec![INTERNAL_STORAGE.into()],
lifetime: Lifetime::User,
..Default::default()
});
let current_seq = match StorageManager.snapshot_metric(
glean.storage(),
INTERNAL_STORAGE,
&seq.meta().identifier(glean),
) {
Some(Metric::Counter(i)) => i,
_ => 0,
};
// Increase to next sequence id
seq.add(glean, 1);
current_seq as usize
}
/// Gets the formatted start and end times for this ping and update for the next ping.
fn get_start_end_times(&self, glean: &Glean, storage_name: &str) -> (String, String) {
let time_unit = TimeUnit::Minute;
let start_time = DatetimeMetric::new(
CommonMetricData {
name: format!("{}#start", storage_name),
category: "".into(),
send_in_pings: vec![INTERNAL_STORAGE.into()],
lifetime: Lifetime::User,
..Default::default()
},
time_unit,
);
// "start_time" is the time the ping was generated the last time.
// If not available, we use the date the Glean object was initialized.
let start_time_data = start_time
.get_value(glean, INTERNAL_STORAGE)
.unwrap_or_else(|| glean.start_time());
let end_time_data = local_now_with_offset();
// Update the start time with the current time.
start_time.set(glean, Some(end_time_data));
// Format the times.
let start_time_data = get_iso_time_string(start_time_data, time_unit);
let end_time_data = get_iso_time_string(end_time_data, time_unit);
(start_time_data, end_time_data)
}
fn get_ping_info(&self, glean: &Glean, storage_name: &str, reason: Option<&str>) -> JsonValue {
let (start_time, end_time) = self.get_start_end_times(glean, storage_name);
let mut map = json!({
"seq": self.get_ping_seq(glean, storage_name),
"start_time": start_time,
"end_time": end_time,
});
if let Some(reason) = reason {
map.as_object_mut()
.unwrap() // safe unwrap, we created the object above
.insert("reason".to_string(), JsonValue::String(reason.to_string()));
};
// Get the experiment data, if available.
if let Some(experiment_data) =
StorageManager.snapshot_experiments_as_json(glean.storage(), INTERNAL_STORAGE)
{
map.as_object_mut()
.unwrap() // safe unwrap, we created the object above
.insert("experiments".to_string(), experiment_data);
};
map
}
fn get_client_info(&self, glean: &Glean, include_client_id: bool) -> JsonValue {
// Add the "telemetry_sdk_build", which is the glean-core version.
let mut map = json!({
"telemetry_sdk_build": crate::GLEAN_VERSION,
});
// Flatten the whole thing.
if let Some(client_info) =
StorageManager.snapshot_as_json(glean.storage(), "glean_client_info", true)
{
let client_info_obj = client_info.as_object().unwrap(); // safe unwrap, snapshot always returns an object.
for (_key, value) in client_info_obj {
merge(&mut map, value);
}
} else {
log::warn!("Empty client info data.");
}
if !include_client_id {
// safe unwrap, we created the object above
map.as_object_mut().unwrap().remove("client_id");
}
json!(map)
}
/// Build the metadata JSON to be persisted with a ping.
///
/// Currently the only type of metadata we need to persist is the value of the `X-Debug-ID` header.
///
/// # Arguments
///
/// * `glean` - the Glean instance to collect metadata from.
///
/// # Returns
///
/// A JSON object representing the metadata that needs to be persisted with this ping.
///
/// The structure of the metadata json is:
///
/// ```json
/// {
/// "headers": {
/// "X-Debug-ID": "test-tag"
/// }
/// }
/// ```
fn get_metadata(&self, glean: &Glean) -> Option<JsonValue> {
let mut headers_map = json!({});
if let Some(debug_view_tag) = glean.debug_view_tag() {
headers_map
.as_object_mut()
.unwrap() // safe unwrap, we created the object above
.insert(
"X-Debug-ID".to_string(),
JsonValue::String(debug_view_tag.to_string()),
);
}
if let Some(source_tags) = glean.source_tags() {
headers_map
.as_object_mut()
.unwrap() // safe unwrap, we created the object above
.insert(
"X-Source-Tags".to_string(),
JsonValue::String(source_tags.join(",")),
);
}
// safe unwrap, we created the object above
if !headers_map.as_object().unwrap().is_empty() {
Some(json!({
"headers": headers_map,
}))
} else {
None
}
}
/// Collects a snapshot for the given ping from storage and attach required meta information.
///
/// # Arguments
///
/// * `glean` - the Glean instance to collect data from.
/// * `ping` - the ping to collect for.
/// * `reason` - an optional reason code to include in the ping.
///
/// # Returns
///
/// A fully assembled JSON representation of the ping payload.
/// If there is no data stored for the ping, `None` is returned.
pub fn collect(
&self,
glean: &Glean,
ping: &PingType,
reason: Option<&str>,
) -> Option<JsonValue> {
info!("Collecting {}", ping.name);
let metrics_data = StorageManager.snapshot_as_json(glean.storage(), &ping.name, true);
let events_data = glean.event_storage().snapshot_as_json(&ping.name, true);
let is_empty = metrics_data.is_none() && events_data.is_none();
if !ping.send_if_empty && is_empty {
info!("Storage for {} empty. Bailing out.", ping.name);
return None;
} else if is_empty {
info!("Storage for {} empty. Ping will still be sent.", ping.name);
}
let ping_info = self.get_ping_info(glean, &ping.name, reason);
let client_info = self.get_client_info(glean, ping.include_client_id);
let mut json = json!({
"ping_info": ping_info,
"client_info": client_info
});
let json_obj = json.as_object_mut()?;
if let Some(metrics_data) = metrics_data {
json_obj.insert("metrics".to_string(), metrics_data);
}
if let Some(events_data) = events_data {
json_obj.insert("events".to_string(), events_data);
}
Some(json)
}
/// Collects a snapshot for the given ping from storage and attach required meta information.
///
/// # Arguments
///
/// * `glean` - the Glean instance to collect data from.
/// * `ping` - the ping to collect for.
/// * `reason` - an optional reason code to include in the ping.
///
/// # Returns
///
/// A fully assembled ping payload in a string encoded as JSON.
/// If there is no data stored for the ping, `None` is returned.
pub fn collect_string(
&self,
glean: &Glean,
ping: &PingType,
reason: Option<&str>,
) -> Option<String> {
self.collect(glean, ping, reason)
.map(|ping| ::serde_json::to_string_pretty(&ping).unwrap())
}
/// Gets the path to a directory for ping storage.
///
/// The directory will be created inside the `data_path`.
/// The `pings` directory (and its parents) is created if it does not exist.
fn get_pings_dir(&self, data_path: &Path, ping_type: Option<&str>) -> std::io::Result<PathBuf> {
// Use a special directory for deletion-request pings
let pings_dir = match ping_type {
Some(ping_type) if ping_type == "deletion-request" => {
data_path.join(DELETION_REQUEST_PINGS_DIRECTORY)
}
_ => data_path.join(PENDING_PINGS_DIRECTORY),
};
create_dir_all(&pings_dir)?;
Ok(pings_dir)
}
/// Gets path to a directory for temporary storage.
///
/// The directory will be created inside the `data_path`.
/// The `tmp` directory (and its parents) is created if it does not exist.
fn get_tmp_dir(&self, data_path: &Path) -> std::io::Result<PathBuf> {
let pings_dir = data_path.join("tmp");
create_dir_all(&pings_dir)?;
Ok(pings_dir)
}
/// Stores a ping to disk in the pings directory.
pub fn store_ping(
&self,
glean: &Glean,
doc_id: &str,
ping_name: &str,
data_path: &Path,
url_path: &str,
ping_content: &JsonValue,
) -> std::io::Result<()> {
let pings_dir = self.get_pings_dir(data_path, Some(ping_name))?;
let temp_dir = self.get_tmp_dir(data_path)?;
// Write to a temporary location and then move when done,
// for transactional writes.
let temp_ping_path = temp_dir.join(doc_id);
let ping_path = pings_dir.join(doc_id);
log::debug!("Storing ping '{}' at '{}'", doc_id, ping_path.display());
{
let mut file = File::create(&temp_ping_path)?;
file.write_all(url_path.as_bytes())?;
file.write_all(b"\n")?;
file.write_all(::serde_json::to_string(ping_content)?.as_bytes())?;
if let Some(metadata) = self.get_metadata(glean) {
file.write_all(b"\n")?;
file.write_all(::serde_json::to_string(&metadata)?.as_bytes())?;
}
}
if let Err(e) = std::fs::rename(&temp_ping_path, &ping_path) {
log::warn!(
"Unable to move '{}' to '{}",
temp_ping_path.display(),
ping_path.display()
);
return Err(e);
}
Ok(())
}
/// Clears any pending pings in the queue.
pub fn clear_pending_pings(&self, data_path: &Path) -> Result<()> {
let pings_dir = self.get_pings_dir(data_path, None)?;
std::fs::remove_dir_all(&pings_dir)?;
create_dir_all(&pings_dir)?;
log::debug!("All pending pings deleted");
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::tests::new_glean;
#[test]
fn sequence_numbers_should_be_reset_when_toggling_uploading() {
let (mut glean, _) = new_glean(None);
let ping_maker = PingMaker::new();
assert_eq!(0, ping_maker.get_ping_seq(&glean, "custom"));
assert_eq!(1, ping_maker.get_ping_seq(&glean, "custom"));
glean.set_upload_enabled(false);
assert_eq!(0, ping_maker.get_ping_seq(&glean, "custom"));
assert_eq!(0, ping_maker.get_ping_seq(&glean, "custom"));
glean.set_upload_enabled(true);
assert_eq!(0, ping_maker.get_ping_seq(&glean, "custom"));
assert_eq!(1, ping_maker.get_ping_seq(&glean, "custom"));
}
}

516
third_party/rust/glean-core/src/storage/mod.rs поставляемый
Просмотреть файл

@ -1,258 +1,258 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![allow(non_upper_case_globals)]
//! Storage snapshotting.
use std::collections::HashMap;
use serde_json::{json, Value as JsonValue};
use crate::database::Database;
use crate::metrics::Metric;
use crate::Lifetime;
/// Snapshot metrics from the underlying database.
pub struct StorageManager;
/// Labeled metrics are stored as `<metric id>/<label>`.
/// They need to go into a nested object in the final snapshot.
///
/// We therefore extract the metric id and the label from the key and construct the new object or
/// add to it.
fn snapshot_labeled_metrics(
snapshot: &mut HashMap<String, HashMap<String, JsonValue>>,
metric_id: &str,
metric: &Metric,
) {
let ping_section = format!("labeled_{}", metric.ping_section());
let map = snapshot.entry(ping_section).or_insert_with(HashMap::new);
let mut s = metric_id.splitn(2, '/');
let metric_id = s.next().unwrap(); // Safe unwrap, the function is only called when the id does contain a '/'
let label = s.next().unwrap(); // Safe unwrap, the function is only called when the name does contain a '/'
let obj = map.entry(metric_id.into()).or_insert_with(|| json!({}));
let obj = obj.as_object_mut().unwrap(); // safe unwrap, we constructed the object above
obj.insert(label.into(), metric.as_json());
}
impl StorageManager {
/// Snapshots the given store and optionally clear it.
///
/// # Arguments
///
/// * `storage` - the database to read from.
/// * `store_name` - the store to snapshot.
/// * `clear_store` - whether to clear the data after snapshotting.
///
/// # Returns
///
/// The stored data in a string encoded as JSON.
/// If no data for the store exists, `None` is returned.
pub fn snapshot(
&self,
storage: &Database,
store_name: &str,
clear_store: bool,
) -> Option<String> {
self.snapshot_as_json(storage, store_name, clear_store)
.map(|data| ::serde_json::to_string_pretty(&data).unwrap())
}
/// Snapshots the given store and optionally clear it.
///
/// # Arguments
///
/// * `storage` - the database to read from.
/// * `store_name` - the store to snapshot.
/// * `clear_store` - whether to clear the data after snapshotting.
///
/// # Returns
///
/// A JSON representation of the stored data.
/// If no data for the store exists, `None` is returned.
pub fn snapshot_as_json(
&self,
storage: &Database,
store_name: &str,
clear_store: bool,
) -> Option<JsonValue> {
let mut snapshot: HashMap<String, HashMap<String, JsonValue>> = HashMap::new();
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
if metric_id.contains('/') {
snapshot_labeled_metrics(&mut snapshot, &metric_id, &metric);
} else {
let map = snapshot
.entry(metric.ping_section().into())
.or_insert_with(HashMap::new);
map.insert(metric_id, metric.as_json());
}
};
storage.iter_store_from(Lifetime::Ping, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::Application, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::User, &store_name, None, &mut snapshotter);
if clear_store {
if let Err(e) = storage.clear_ping_lifetime_storage(store_name) {
log::error!("Failed to clear lifetime storage: {:?}", e);
}
}
if snapshot.is_empty() {
None
} else {
Some(json!(snapshot))
}
}
/// Gets the current value of a single metric identified by name.
///
/// This look for a value in stores for all lifetimes.
///
/// # Arguments
///
/// * `storage` - The database to get data from.
/// * `store_name` - The store name to look into.
/// * `metric_id` - The full metric identifier.
///
/// # Returns
///
/// The decoded metric or `None` if no data is found.
pub fn snapshot_metric(
&self,
storage: &Database,
store_name: &str,
metric_id: &str,
) -> Option<Metric> {
let mut snapshot: Option<Metric> = None;
let mut snapshotter = |id: &[u8], metric: &Metric| {
let id = String::from_utf8_lossy(id).into_owned();
if id == metric_id {
snapshot = Some(metric.clone())
}
};
storage.iter_store_from(Lifetime::Ping, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::Application, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::User, &store_name, None, &mut snapshotter);
snapshot
}
/// Snapshots the experiments.
///
/// # Arguments
///
/// * `storage` - The database to get data from.
/// * `store_name` - The store name to look into.
///
/// # Returns
///
/// A JSON representation of the experiment data, in the following format:
///
/// ```json
/// {
/// "experiment-id": {
/// "branch": "branch-id",
/// "extra": {
/// "additional": "property",
/// // ...
/// }
/// }
/// }
/// ```
///
/// If no data for the store exists, `None` is returned.
pub fn snapshot_experiments_as_json(
&self,
storage: &Database,
store_name: &str,
) -> Option<JsonValue> {
let mut snapshot: HashMap<String, JsonValue> = HashMap::new();
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
if metric_id.ends_with("#experiment") {
let name = metric_id.splitn(2, '#').next().unwrap(); // safe unwrap, first field of a split always valid
snapshot.insert(name.to_string(), metric.as_json());
}
};
storage.iter_store_from(Lifetime::Application, store_name, None, &mut snapshotter);
if snapshot.is_empty() {
None
} else {
Some(json!(snapshot))
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::metrics::ExperimentMetric;
use crate::Glean;
// Experiment's API tests: the next test comes from glean-ac's
// ExperimentsStorageEngineTest.kt.
#[test]
fn test_experiments_json_serialization() {
let t = tempfile::tempdir().unwrap();
let name = t.path().display().to_string();
let glean = Glean::with_options(&name, "org.mozilla.glean", true);
let extra: HashMap<String, String> = [("test-key".into(), "test-value".into())]
.iter()
.cloned()
.collect();
let metric = ExperimentMetric::new(&glean, "some-experiment".to_string());
metric.set_active(&glean, "test-branch".to_string(), Some(extra));
let snapshot = StorageManager
.snapshot_experiments_as_json(glean.storage(), "glean_internal_info")
.unwrap();
assert_eq!(
json!({"some-experiment": {"branch": "test-branch", "extra": {"test-key": "test-value"}}}),
snapshot
);
metric.set_inactive(&glean);
let empty_snapshot =
StorageManager.snapshot_experiments_as_json(glean.storage(), "glean_internal_info");
assert!(empty_snapshot.is_none());
}
#[test]
fn test_experiments_json_serialization_empty() {
let t = tempfile::tempdir().unwrap();
let name = t.path().display().to_string();
let glean = Glean::with_options(&name, "org.mozilla.glean", true);
let metric = ExperimentMetric::new(&glean, "some-experiment".to_string());
metric.set_active(&glean, "test-branch".to_string(), None);
let snapshot = StorageManager
.snapshot_experiments_as_json(glean.storage(), "glean_internal_info")
.unwrap();
assert_eq!(
json!({"some-experiment": {"branch": "test-branch"}}),
snapshot
);
metric.set_inactive(&glean);
let empty_snapshot =
StorageManager.snapshot_experiments_as_json(glean.storage(), "glean_internal_info");
assert!(empty_snapshot.is_none());
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![allow(non_upper_case_globals)]
//! Storage snapshotting.
use std::collections::HashMap;
use serde_json::{json, Value as JsonValue};
use crate::database::Database;
use crate::metrics::Metric;
use crate::Lifetime;
/// Snapshot metrics from the underlying database.
pub struct StorageManager;
/// Labeled metrics are stored as `<metric id>/<label>`.
/// They need to go into a nested object in the final snapshot.
///
/// We therefore extract the metric id and the label from the key and construct the new object or
/// add to it.
fn snapshot_labeled_metrics(
snapshot: &mut HashMap<String, HashMap<String, JsonValue>>,
metric_id: &str,
metric: &Metric,
) {
let ping_section = format!("labeled_{}", metric.ping_section());
let map = snapshot.entry(ping_section).or_insert_with(HashMap::new);
let mut s = metric_id.splitn(2, '/');
let metric_id = s.next().unwrap(); // Safe unwrap, the function is only called when the id does contain a '/'
let label = s.next().unwrap(); // Safe unwrap, the function is only called when the name does contain a '/'
let obj = map.entry(metric_id.into()).or_insert_with(|| json!({}));
let obj = obj.as_object_mut().unwrap(); // safe unwrap, we constructed the object above
obj.insert(label.into(), metric.as_json());
}
impl StorageManager {
/// Snapshots the given store and optionally clear it.
///
/// # Arguments
///
/// * `storage` - the database to read from.
/// * `store_name` - the store to snapshot.
/// * `clear_store` - whether to clear the data after snapshotting.
///
/// # Returns
///
/// The stored data in a string encoded as JSON.
/// If no data for the store exists, `None` is returned.
pub fn snapshot(
&self,
storage: &Database,
store_name: &str,
clear_store: bool,
) -> Option<String> {
self.snapshot_as_json(storage, store_name, clear_store)
.map(|data| ::serde_json::to_string_pretty(&data).unwrap())
}
/// Snapshots the given store and optionally clear it.
///
/// # Arguments
///
/// * `storage` - the database to read from.
/// * `store_name` - the store to snapshot.
/// * `clear_store` - whether to clear the data after snapshotting.
///
/// # Returns
///
/// A JSON representation of the stored data.
/// If no data for the store exists, `None` is returned.
pub fn snapshot_as_json(
&self,
storage: &Database,
store_name: &str,
clear_store: bool,
) -> Option<JsonValue> {
let mut snapshot: HashMap<String, HashMap<String, JsonValue>> = HashMap::new();
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
if metric_id.contains('/') {
snapshot_labeled_metrics(&mut snapshot, &metric_id, &metric);
} else {
let map = snapshot
.entry(metric.ping_section().into())
.or_insert_with(HashMap::new);
map.insert(metric_id, metric.as_json());
}
};
storage.iter_store_from(Lifetime::Ping, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::Application, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::User, &store_name, None, &mut snapshotter);
if clear_store {
if let Err(e) = storage.clear_ping_lifetime_storage(store_name) {
log::error!("Failed to clear lifetime storage: {:?}", e);
}
}
if snapshot.is_empty() {
None
} else {
Some(json!(snapshot))
}
}
/// Gets the current value of a single metric identified by name.
///
/// This look for a value in stores for all lifetimes.
///
/// # Arguments
///
/// * `storage` - The database to get data from.
/// * `store_name` - The store name to look into.
/// * `metric_id` - The full metric identifier.
///
/// # Returns
///
/// The decoded metric or `None` if no data is found.
pub fn snapshot_metric(
&self,
storage: &Database,
store_name: &str,
metric_id: &str,
) -> Option<Metric> {
let mut snapshot: Option<Metric> = None;
let mut snapshotter = |id: &[u8], metric: &Metric| {
let id = String::from_utf8_lossy(id).into_owned();
if id == metric_id {
snapshot = Some(metric.clone())
}
};
storage.iter_store_from(Lifetime::Ping, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::Application, &store_name, None, &mut snapshotter);
storage.iter_store_from(Lifetime::User, &store_name, None, &mut snapshotter);
snapshot
}
/// Snapshots the experiments.
///
/// # Arguments
///
/// * `storage` - The database to get data from.
/// * `store_name` - The store name to look into.
///
/// # Returns
///
/// A JSON representation of the experiment data, in the following format:
///
/// ```json
/// {
/// "experiment-id": {
/// "branch": "branch-id",
/// "extra": {
/// "additional": "property",
/// // ...
/// }
/// }
/// }
/// ```
///
/// If no data for the store exists, `None` is returned.
pub fn snapshot_experiments_as_json(
&self,
storage: &Database,
store_name: &str,
) -> Option<JsonValue> {
let mut snapshot: HashMap<String, JsonValue> = HashMap::new();
let mut snapshotter = |metric_id: &[u8], metric: &Metric| {
let metric_id = String::from_utf8_lossy(metric_id).into_owned();
if metric_id.ends_with("#experiment") {
let name = metric_id.splitn(2, '#').next().unwrap(); // safe unwrap, first field of a split always valid
snapshot.insert(name.to_string(), metric.as_json());
}
};
storage.iter_store_from(Lifetime::Application, store_name, None, &mut snapshotter);
if snapshot.is_empty() {
None
} else {
Some(json!(snapshot))
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::metrics::ExperimentMetric;
use crate::Glean;
// Experiment's API tests: the next test comes from glean-ac's
// ExperimentsStorageEngineTest.kt.
#[test]
fn test_experiments_json_serialization() {
let t = tempfile::tempdir().unwrap();
let name = t.path().display().to_string();
let glean = Glean::with_options(&name, "org.mozilla.glean", true);
let extra: HashMap<String, String> = [("test-key".into(), "test-value".into())]
.iter()
.cloned()
.collect();
let metric = ExperimentMetric::new(&glean, "some-experiment".to_string());
metric.set_active(&glean, "test-branch".to_string(), Some(extra));
let snapshot = StorageManager
.snapshot_experiments_as_json(glean.storage(), "glean_internal_info")
.unwrap();
assert_eq!(
json!({"some-experiment": {"branch": "test-branch", "extra": {"test-key": "test-value"}}}),
snapshot
);
metric.set_inactive(&glean);
let empty_snapshot =
StorageManager.snapshot_experiments_as_json(glean.storage(), "glean_internal_info");
assert!(empty_snapshot.is_none());
}
#[test]
fn test_experiments_json_serialization_empty() {
let t = tempfile::tempdir().unwrap();
let name = t.path().display().to_string();
let glean = Glean::with_options(&name, "org.mozilla.glean", true);
let metric = ExperimentMetric::new(&glean, "some-experiment".to_string());
metric.set_active(&glean, "test-branch".to_string(), None);
let snapshot = StorageManager
.snapshot_experiments_as_json(glean.storage(), "glean_internal_info")
.unwrap();
assert_eq!(
json!({"some-experiment": {"branch": "test-branch"}}),
snapshot
);
metric.set_inactive(&glean);
let empty_snapshot =
StorageManager.snapshot_experiments_as_json(glean.storage(), "glean_internal_info");
assert!(empty_snapshot.is_none());
}
}

162
third_party/rust/glean-core/src/system.rs поставляемый
Просмотреть файл

@ -1,81 +1,81 @@
// Copyright (c) 2017 The Rust Project Developers
// Licensed under the MIT License.
// Original license:
// https://github.com/RustSec/platforms-crate/blob/ebbd3403243067ba3096f31684557285e352b639/LICENSE-MIT
//
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Detect and expose `target_os` as a constant.
//!
//! Code adopted from the "platforms" crate: <https://github.com/RustSec/platforms-crate>.
#[cfg(target_os = "android")]
/// `target_os` when building this crate: `android`
pub const OS: &str = "Android";
#[cfg(target_os = "ios")]
/// `target_os` when building this crate: `ios`
pub const OS: &str = "iOS";
#[cfg(target_os = "linux")]
/// `target_os` when building this crate: `linux`
pub const OS: &str = "Linux";
#[cfg(target_os = "macos")]
/// `target_os` when building this crate: `macos`
pub const OS: &str = "Darwin";
#[cfg(target_os = "windows")]
/// `target_os` when building this crate: `windows`
pub const OS: &str = "Windows";
#[cfg(target_os = "freebsd")]
/// `target_os` when building this crate: `freebsd`
pub const OS: &str = "FreeBSD";
#[cfg(target_os = "netbsd")]
/// `target_os` when building this crate: `netbsd`
pub const OS: &str = "NetBSD";
#[cfg(target_os = "openbsd")]
/// `target_os` when building this crate: `openbsd`
pub const OS: &str = "OpenBSD";
#[cfg(target_os = "solaris")]
/// `target_os` when building this crate: `solaris`
pub const OS: &str = "Solaris";
#[cfg(not(any(
target_os = "android",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris",
)))]
pub const OS: &str = "unknown";
// Copyright (c) 2017 The Rust Project Developers
// Licensed under the MIT License.
// Original license:
// https://github.com/RustSec/platforms-crate/blob/ebbd3403243067ba3096f31684557285e352b639/LICENSE-MIT
//
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Detect and expose `target_os` as a constant.
//!
//! Code adopted from the "platforms" crate: <https://github.com/RustSec/platforms-crate>.
#[cfg(target_os = "android")]
/// `target_os` when building this crate: `android`
pub const OS: &str = "Android";
#[cfg(target_os = "ios")]
/// `target_os` when building this crate: `ios`
pub const OS: &str = "iOS";
#[cfg(target_os = "linux")]
/// `target_os` when building this crate: `linux`
pub const OS: &str = "Linux";
#[cfg(target_os = "macos")]
/// `target_os` when building this crate: `macos`
pub const OS: &str = "Darwin";
#[cfg(target_os = "windows")]
/// `target_os` when building this crate: `windows`
pub const OS: &str = "Windows";
#[cfg(target_os = "freebsd")]
/// `target_os` when building this crate: `freebsd`
pub const OS: &str = "FreeBSD";
#[cfg(target_os = "netbsd")]
/// `target_os` when building this crate: `netbsd`
pub const OS: &str = "NetBSD";
#[cfg(target_os = "openbsd")]
/// `target_os` when building this crate: `openbsd`
pub const OS: &str = "OpenBSD";
#[cfg(target_os = "solaris")]
/// `target_os` when building this crate: `solaris`
pub const OS: &str = "Solaris";
#[cfg(not(any(
target_os = "android",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
target_os = "solaris",
)))]
pub const OS: &str = "unknown";

Просмотреть файл

@ -1,28 +1,28 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `BooleanMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Boolean {
/// Sets to the specified boolean value.
///
/// # Arguments
///
/// * `value` - the value to set.
fn set(&self, value: bool);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a boolean.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<bool>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `BooleanMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Boolean {
/// Sets to the specified boolean value.
///
/// # Arguments
///
/// * `value` - the value to set.
fn set(&self, value: bool);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a boolean.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<bool>;
}

Просмотреть файл

@ -1,53 +1,53 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::ErrorType;
/// A description for the `CounterMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Counter {
/// Increases the counter by `amount`.
///
/// # Arguments
///
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
///
/// Logs an error if the `amount` is 0 or negative.
fn add(&self, amount: i32);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<i32>;
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::ErrorType;
/// A description for the `CounterMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Counter {
/// Increases the counter by `amount`.
///
/// # Arguments
///
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
///
/// Logs an error if the `amount` is 0 or negative.
fn add(&self, amount: i32);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<i32>;
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32;
}

Просмотреть файл

@ -1,56 +1,56 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `CustomDistributionMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait CustomDistribution {
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// # Arguments
///
/// - `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
fn accumulate_samples_signed(&self, samples: Vec<i64>);
/// **Exported for test purposes.**
///
/// Gets the currently stored histogram.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<crate::metrics::DistributionData>;
/// **Exported for test purposes.**
///
/// Gets the currently stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `CustomDistributionMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait CustomDistribution {
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// # Arguments
///
/// - `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
fn accumulate_samples_signed(&self, samples: Vec<i64>);
/// **Exported for test purposes.**
///
/// Gets the currently stored histogram.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<crate::metrics::DistributionData>;
/// **Exported for test purposes.**
///
/// Gets the currently stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}

Просмотреть файл

@ -1,78 +1,78 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![allow(clippy::too_many_arguments)]
/// A description for the `DatetimeMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Datetime {
/// Sets the metric to a date/time including the timezone offset.
///
/// # Arguments
///
/// * `year` - the year to set the metric to.
/// * `month` - the month to set the metric to (1-12).
/// * `day` - the day to set the metric to (1-based).
/// * `hour` - the hour to set the metric to.
/// * `minute` - the minute to set the metric to.
/// * `second` - the second to set the metric to.
/// * `nano` - the nanosecond fraction to the last whole second.
/// * `offset_seconds` - the timezone difference, in seconds, for the Eastern
/// Hemisphere. Negative seconds mean Western Hemisphere.
fn set_with_details(
&self,
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
nano: u32,
offset_seconds: i32,
);
/// Sets the metric to a date/time which including the timezone offset.
///
/// # Arguments
///
/// * `value` - Some date/time value, with offset, to set the metric to.
/// If none, the current local time is used.
fn set(&self, value: Option<crate::metrics::Datetime>);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a Datetime.
///
/// The precision of this value is truncated to the `time_unit` precision.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<crate::metrics::Datetime>;
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a String.
///
/// The precision of this value is truncated to the `time_unit` precision.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![allow(clippy::too_many_arguments)]
/// A description for the `DatetimeMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Datetime {
/// Sets the metric to a date/time including the timezone offset.
///
/// # Arguments
///
/// * `year` - the year to set the metric to.
/// * `month` - the month to set the metric to (1-12).
/// * `day` - the day to set the metric to (1-based).
/// * `hour` - the hour to set the metric to.
/// * `minute` - the minute to set the metric to.
/// * `second` - the second to set the metric to.
/// * `nano` - the nanosecond fraction to the last whole second.
/// * `offset_seconds` - the timezone difference, in seconds, for the Eastern
/// Hemisphere. Negative seconds mean Western Hemisphere.
fn set_with_details(
&self,
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
nano: u32,
offset_seconds: i32,
);
/// Sets the metric to a date/time which including the timezone offset.
///
/// # Arguments
///
/// * `value` - Some date/time value, with offset, to set the metric to.
/// If none, the current local time is used.
fn set(&self, value: Option<crate::metrics::Datetime>);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a Datetime.
///
/// The precision of this value is truncated to the `time_unit` precision.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<crate::metrics::Datetime>;
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a String.
///
/// The precision of this value is truncated to the `time_unit` precision.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}

Просмотреть файл

@ -1,53 +1,53 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use crate::event_database::RecordedEvent;
/// A description for the `EventMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Event {
/// Records an event.
///
/// # Arguments
///
/// * `extra` - A HashMap of (key, value) pairs. The key is an index into
/// the metric's `allowed_extra_keys` vector where the key's string is
/// looked up. If any key index is out of range, an error is reported and
/// no event is recorded.
fn record<M: Into<Option<HashMap<i32, String>>>>(&self, extra: M);
/// **Exported for test purposes.**
///
/// Get the vector of currently stored events for this event metric.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<Vec<RecordedEvent>>;
/// **Exported for test purposes.**
///
/// Gets the currently stored events for this event metric as a JSON-encoded string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use crate::event_database::RecordedEvent;
/// A description for the `EventMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Event {
/// Records an event.
///
/// # Arguments
///
/// * `extra` - A HashMap of (key, value) pairs. The key is an index into
/// the metric's `allowed_extra_keys` vector where the key's string is
/// looked up. If any key index is out of range, an error is reported and
/// no event is recorded.
fn record<M: Into<Option<HashMap<i32, String>>>>(&self, extra: M);
/// **Exported for test purposes.**
///
/// Get the vector of currently stored events for this event metric.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<Vec<RecordedEvent>>;
/// **Exported for test purposes.**
///
/// Gets the currently stored events for this event metric as a JSON-encoded string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}

108
third_party/rust/glean-core/src/traits/jwe.rs поставляемый
Просмотреть файл

@ -1,54 +1,54 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `JweMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Jwe {
/// Sets to the specified JWE value.
///
/// # Arguments
///
/// * `value` - the [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7) of a JWE value.
fn set_with_compact_representation<S: Into<String>>(&self, value: S);
/// Builds a JWE value from its elements and set to it.
///
/// # Arguments
///
/// * `header` - the JWE Protected Header element.
/// * `key` - the JWE Encrypted Key element.
/// * `init_vector` - the JWE Initialization Vector element.
/// * `cipher_text` - the JWE Ciphertext element.
/// * `auth_tag` - the JWE Authentication Tag element.
fn set<S: Into<String>>(&self, header: S, key: S, init_vector: S, cipher_text: S, auth_tag: S);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<String>;
/// **Exported for test purposes.**
///
/// Gets the currently stored JWE as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `JweMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Jwe {
/// Sets to the specified JWE value.
///
/// # Arguments
///
/// * `value` - the [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7) of a JWE value.
fn set_with_compact_representation<S: Into<String>>(&self, value: S);
/// Builds a JWE value from its elements and set to it.
///
/// # Arguments
///
/// * `header` - the JWE Protected Header element.
/// * `key` - the JWE Encrypted Key element.
/// * `init_vector` - the JWE Initialization Vector element.
/// * `cipher_text` - the JWE Ciphertext element.
/// * `auth_tag` - the JWE Authentication Tag element.
fn set<S: Into<String>>(&self, header: S, key: S, init_vector: S, cipher_text: S, auth_tag: S);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<String>;
/// **Exported for test purposes.**
///
/// Gets the currently stored JWE as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}

Просмотреть файл

@ -1,46 +1,33 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::ErrorType;
/// A description for the `LabeledMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Labeled<T>
where
T: Clone,
{
/// Gets a specific metric for a given label.
///
/// If a set of acceptable labels were specified in the `metrics.yaml` file,
/// and the given label is not in the set, it will be recorded under the special `OTHER_LABEL` label.
///
/// If a set of acceptable labels was not specified in the `metrics.yaml` file,
/// only the first 16 unique labels will be used.
/// After that, any additional labels will be recorded under the special `OTHER_LABEL` label.
///
/// Labels must be `snake_case` and less than 30 characters.
/// If an invalid label is used, the metric will be recorded in the special `OTHER_LABEL` label.
fn get(&self, label: &str) -> T;
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::MetricType;
/// A description for the `LabeledMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Labeled<T>
where
T: MetricType + Clone,
{
/// Gets a specific metric for a given label.
///
/// If a set of acceptable labels were specified in the `metrics.yaml` file,
/// and the given label is not in the set, it will be recorded under the special `OTHER_LABEL` label.
///
/// If a set of acceptable labels was not specified in the `metrics.yaml` file,
/// only the first 16 unique labels will be used.
/// After that, any additional labels will be recorded under the special `OTHER_LABEL` label.
///
/// Labels must be `snake_case` and less than 30 characters.
/// If an invalid label is used, the metric will be recorded in the special `OTHER_LABEL` label.
fn get(&self, label: &str) -> T;
/// Gets the template submetric.
///
/// The template submetric is the actual metric that is cloned and modified
/// to record for a specific label.
fn get_submetric(&self) -> &T;
}

Просмотреть файл

@ -1,79 +1,79 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::DistributionData;
/// A description for the `MemoryDistributionMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait MemoryDistribution {
/// Accumulates the provided sample in the metric.
///
/// # Arguments
///
/// * `sample` - The sample to be recorded by the metric. The sample is assumed to be in the
/// configured memory unit of the metric.
///
/// ## Notes
///
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
fn accumulate(&self, sample: u64);
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [MemoryDistributionMetricType] and the instance this
/// method was called on is using [MemoryUnit.Kilobyte], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
fn accumulate_samples_signed(&self, samples: Vec<i64>);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<DistributionData>;
/// **Exported for test purposes.**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::DistributionData;
/// A description for the `MemoryDistributionMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait MemoryDistribution {
/// Accumulates the provided sample in the metric.
///
/// # Arguments
///
/// * `sample` - The sample to be recorded by the metric. The sample is assumed to be in the
/// configured memory unit of the metric.
///
/// ## Notes
///
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
fn accumulate(&self, sample: u64);
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [MemoryDistributionMetricType] and the instance this
/// method was called on is using [MemoryUnit.Kilobyte], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
fn accumulate_samples_signed(&self, samples: Vec<i64>);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<DistributionData>;
/// **Exported for test purposes.**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}

84
third_party/rust/glean-core/src/traits/mod.rs поставляемый
Просмотреть файл

@ -1,42 +1,42 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Important: consider this module unstable / experimental.
//!
//! The different metric types supported by the Glean SDK to handle data.
mod boolean;
mod counter;
mod custom_distribution;
mod datetime;
mod event;
mod jwe;
mod labeled;
mod memory_distribution;
mod ping;
mod quantity;
mod string;
mod string_list;
mod timespan;
mod timing_distribution;
mod uuid;
pub use crate::event_database::RecordedEvent;
pub use self::boolean::Boolean;
pub use self::counter::Counter;
pub use self::custom_distribution::CustomDistribution;
pub use self::datetime::Datetime;
pub use self::event::Event;
pub use self::jwe::Jwe;
pub use self::labeled::Labeled;
pub use self::memory_distribution::MemoryDistribution;
pub use self::ping::Ping;
pub use self::quantity::Quantity;
pub use self::string::String;
pub use self::string_list::StringList;
pub use self::timespan::Timespan;
pub use self::timing_distribution::TimingDistribution;
pub use self::uuid::Uuid;
pub use crate::histogram::HistogramType;
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Important: consider this module unstable / experimental.
//!
//! The different metric types supported by the Glean SDK to handle data.
mod boolean;
mod counter;
mod custom_distribution;
mod datetime;
mod event;
mod jwe;
mod labeled;
mod memory_distribution;
mod ping;
mod quantity;
mod string;
mod string_list;
mod timespan;
mod timing_distribution;
mod uuid;
pub use crate::event_database::RecordedEvent;
pub use self::boolean::Boolean;
pub use self::counter::Counter;
pub use self::custom_distribution::CustomDistribution;
pub use self::datetime::Datetime;
pub use self::event::Event;
pub use self::jwe::Jwe;
pub use self::labeled::Labeled;
pub use self::memory_distribution::MemoryDistribution;
pub use self::ping::Ping;
pub use self::quantity::Quantity;
pub use self::string::String;
pub use self::string_list::StringList;
pub use self::timespan::Timespan;
pub use self::timing_distribution::TimingDistribution;
pub use self::uuid::Uuid;
pub use crate::histogram::HistogramType;

Просмотреть файл

@ -1,17 +1,23 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `PingType` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Ping {
/// Submits the ping for eventual uploading
///
/// # Arguments
///
/// * `reason` - the reason the ping was triggered. Included in the
/// `ping_info.reason` part of the payload.
fn submit(&self, reason: Option<&str>);
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error::Result;
/// A description for the `PingType` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Ping {
/// Submits the ping for eventual uploading
///
/// # Arguments
///
/// * `reason` - the reason the ping was triggered. Included in the
/// `ping_info.reason` part of the payload.
///
/// # Returns
///
/// See [`Glean#submit_ping`](../struct.Glean.html#method.submit_ping) for details.
fn submit(&self, reason: Option<&str>) -> Result<bool>;
}

Просмотреть файл

@ -1,53 +1,32 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::ErrorType;
/// A description for the `QuantityMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Quantity {
/// Sets the value. Must be non-negative.
///
/// # Arguments
///
/// * `value` - The value. Must be non-negative.
///
/// ## Notes
///
/// Logs an error if the `value` is negative.
fn set(&self, value: i64);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<i64>;
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `QuantityMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Quantity {
/// Sets the value. Must be non-negative.
///
/// # Arguments
///
/// * `value` - The value. Must be non-negative.
///
/// ## Notes
///
/// Logs an error if the `value` is negative.
fn set(&self, value: i64);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<i64>;
}

Просмотреть файл

@ -1,56 +1,56 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::ErrorType;
/// A description for the `StringMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait String {
/// Sets to the specified value.
///
/// # Arguments
///
/// * `value` - The string to set the metric to.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
fn set<S: Into<std::string::String>>(&self, value: S);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<std::string::String>;
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::ErrorType;
/// A description for the `StringMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait String {
/// Sets to the specified value.
///
/// # Arguments
///
/// * `value` - The string to set the metric to.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
fn set<S: Into<std::string::String>>(&self, value: S);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<std::string::String>;
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32;
}

Просмотреть файл

@ -1,61 +1,61 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `StringListMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait StringList {
/// Adds a new string to the list.
///
/// # Arguments
///
/// * `value` - The string to add.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
fn add<S: Into<String>>(&self, value: S);
/// Sets to a specific list of strings.
///
/// # Arguments
///
/// * `value` - The list of string to set the metric to.
///
/// ## Notes
///
/// If passed an empty list, records an error and returns.
/// Truncates the list if it is longer than `MAX_LIST_LENGTH` and logs an error.
/// Truncates any value in the list if it is longer than `MAX_STRING_LENGTH` and logs an error.
fn set(&self, value: Vec<String>);
/// **Exported for test purposes.**
///
/// Gets the currently-stored values.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<Vec<String>>;
/// **Exported for test purposes.**
///
/// Gets the currently-stored values as a JSON String of the format
/// ["string1", "string2", ...]
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `StringListMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait StringList {
/// Adds a new string to the list.
///
/// # Arguments
///
/// * `value` - The string to add.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
fn add<S: Into<String>>(&self, value: S);
/// Sets to a specific list of strings.
///
/// # Arguments
///
/// * `value` - The list of string to set the metric to.
///
/// ## Notes
///
/// If passed an empty list, records an error and returns.
/// Truncates the list if it is longer than `MAX_LIST_LENGTH` and logs an error.
/// Truncates any value in the list if it is longer than `MAX_STRING_LENGTH` and logs an error.
fn set(&self, value: Vec<String>);
/// **Exported for test purposes.**
///
/// Gets the currently-stored values.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<Vec<String>>;
/// **Exported for test purposes.**
///
/// Gets the currently-stored values as a JSON String of the format
/// ["string1", "string2", ...]
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}

Просмотреть файл

@ -1,53 +1,53 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::time::Duration;
/// A description for the `TimespanMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Timespan {
/// Starts tracking time for the provided metric.
///
/// This records an error if it's already tracking time (i.e. start was already
/// called with no corresponding `stop`): in that case the original
/// start time will be preserved.
fn set_start(&mut self, start_time: u64);
/// Stops tracking time for the provided metric. Sets the metric to the elapsed time.
///
/// This will record an error if no `start` was called.
fn set_stop(&mut self, stop_time: u64);
/// Aborts a previous `start` call. No error is recorded if no `start` was called.
fn cancel(&mut self);
/// Explicitly sets the timespan value.
///
/// This API should only be used if your library or application requires recording
/// times in a way that can not make use of `start`/`stop`/`cancel`.
///
/// Care should be taken using this if the ping lifetime might contain more than one
/// timespan measurement. To be safe, `set_raw` should generally be followed by
/// sending a custom ping containing the timespan.
///
/// # Arguments
///
/// * `elapsed` - The elapsed time to record.
/// * `overwrite` - Whether or not to overwrite existing data.
fn set_raw(&self, elapsed: Duration, overwrite: bool);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<u64>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::time::Duration;
/// A description for the `TimespanMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Timespan {
/// Starts tracking time for the provided metric.
///
/// This records an error if it's already tracking time (i.e. start was already
/// called with no corresponding `stop`): in that case the original
/// start time will be preserved.
fn set_start(&mut self, start_time: u64);
/// Stops tracking time for the provided metric. Sets the metric to the elapsed time.
///
/// This will record an error if no `start` was called.
fn set_stop(&mut self, stop_time: u64);
/// Aborts a previous `start` call. No error is recorded if no `start` was called.
fn cancel(&mut self);
/// Explicitly sets the timespan value.
///
/// This API should only be used if your library or application requires recording
/// times in a way that can not make use of `start`/`stop`/`cancel`.
///
/// Care should be taken using this if the ping lifetime might contain more than one
/// timespan measurement. To be safe, `set_raw` should generally be followed by
/// sending a custom ping containing the timespan.
///
/// # Arguments
///
/// * `elapsed` - The elapsed time to record.
/// * `overwrite` - Whether or not to overwrite existing data.
fn set_raw(&self, elapsed: Duration, overwrite: bool);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<u64>;
}

Просмотреть файл

@ -1,104 +1,104 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::DistributionData;
use crate::metrics::TimerId;
/// A description for the `TimingDistributionMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait TimingDistribution {
/// Starts tracking time for the provided metric.
///
/// This records an error if its already tracking time (i.e. start was already
/// called with no corresponding [stop]): in that case the original
/// start time will be preserved.
///
/// # Arguments
///
/// * `start_time` - Timestamp in nanoseconds.
///
/// # Returns
///
/// A unique `TimerId` for the new timer.
fn set_start(&mut self, start_time: u64);
/// Stops tracking time for the provided metric and associated timer id.
///
/// Adds a count to the corresponding bucket in the timing distribution.
/// This will record an error if no `start` was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timespan metric.
/// * `stop_time` - Timestamp in nanoseconds.
fn set_stop_and_accumulate(&mut self, id: TimerId, stop_time: u64);
/// Aborts a previous `set_start` call. No error is recorded if no `set_start`
/// was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timing distribution metric.
fn cancel(&mut self, id: TimerId);
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [TimingDistributionMetricType] and the instance this
/// method was called on is using [TimeUnit.Second], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them. Reports an `ErrorType::InvalidOverflow` error for samples that
/// are longer than `MAX_SAMPLE_TIME`.
fn accumulate_samples_signed(&mut self, samples: Vec<i64>);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<DistributionData>;
/// **Exported for test purposes.**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::metrics::DistributionData;
use crate::metrics::TimerId;
/// A description for the `TimingDistributionMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait TimingDistribution {
/// Starts tracking time for the provided metric.
///
/// This records an error if its already tracking time (i.e. start was already
/// called with no corresponding [stop]): in that case the original
/// start time will be preserved.
///
/// # Arguments
///
/// * `start_time` - Timestamp in nanoseconds.
///
/// # Returns
///
/// A unique `TimerId` for the new timer.
fn set_start(&mut self, start_time: u64);
/// Stops tracking time for the provided metric and associated timer id.
///
/// Adds a count to the corresponding bucket in the timing distribution.
/// This will record an error if no `start` was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timespan metric.
/// * `stop_time` - Timestamp in nanoseconds.
fn set_stop_and_accumulate(&mut self, id: TimerId, stop_time: u64);
/// Aborts a previous `set_start` call. No error is recorded if no `set_start`
/// was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timing distribution metric.
fn cancel(&mut self, id: TimerId);
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [TimingDistributionMetricType] and the instance this
/// method was called on is using [TimeUnit.Second], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them. Reports an `ErrorType::InvalidOverflow` error for samples that
/// are longer than `MAX_SAMPLE_TIME`.
fn accumulate_samples_signed(&mut self, samples: Vec<i64>);
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<DistributionData>;
/// **Exported for test purposes.**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value_as_json_string<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<String>;
}

Просмотреть файл

@ -1,52 +1,31 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::ErrorType;
/// A description for the `UuidMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Uuid {
/// Sets to the specified value.
///
/// # Arguments
///
/// * `value` - The UUID to set the metric to.
fn set(&self, value: uuid::Uuid);
/// Generates a new random UUID and set the metric to it.
fn generate_and_set(&self) -> uuid::Uuid;
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<uuid::Uuid>;
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32;
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A description for the `UuidMetric` type.
///
/// When changing this trait, make sure all the operations are
/// implemented in the related type in `../metrics/`.
pub trait Uuid {
/// Sets to the specified value.
///
/// # Arguments
///
/// * `value` - The UUID to set the metric to.
fn set(&self, value: uuid::Uuid);
/// Generates a new random UUID and set the metric to it.
fn generate_and_set(&self) -> uuid::Uuid;
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<String>;
}

Просмотреть файл

@ -1,421 +1,421 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Pings directory processing utilities.
use std::cmp::Ordering;
use std::fs::{self, File};
use std::io::{BufRead, BufReader};
use std::path::{Path, PathBuf};
use serde::Deserialize;
use uuid::Uuid;
use super::request::HeaderMap;
use crate::{DELETION_REQUEST_PINGS_DIRECTORY, PENDING_PINGS_DIRECTORY};
/// A representation of the data extracted from a ping file,
/// this will contain the document_id, path, JSON encoded body of a ping and the persisted headers.
pub type PingPayload = (String, String, String, Option<HeaderMap>);
/// A struct to hold the result of scanning all pings directories.
#[derive(Clone, Debug, Default)]
pub struct PingPayloadsByDirectory {
pub pending_pings: Vec<(u64, PingPayload)>,
pub deletion_request_pings: Vec<(u64, PingPayload)>,
}
impl PingPayloadsByDirectory {
/// Extends the data of this instance of PingPayloadsByDirectory
/// with the data from another instance of PingPayloadsByDirectory.
pub fn extend(&mut self, other: PingPayloadsByDirectory) {
self.pending_pings.extend(other.pending_pings);
self.deletion_request_pings
.extend(other.deletion_request_pings);
}
// Get the sum of the number of deletion request and regular pending pings.
pub fn len(&self) -> usize {
self.pending_pings.len() + self.deletion_request_pings.len()
}
}
/// Gets the file name from a path as a &str.
///
/// # Panics
///
/// Won't panic if not able to get file name.
fn get_file_name_as_str(path: &Path) -> Option<&str> {
match path.file_name() {
None => {
log::warn!("Error getting file name from path: {}", path.display());
None
}
Some(file_name) => {
let file_name = file_name.to_str();
if file_name.is_none() {
log::warn!("File name is not valid unicode: {}", path.display());
}
file_name
}
}
}
/// Processes a ping's metadata.
///
/// The metadata is an optional third line in the ping file,
/// currently it contains only additonal headers to be added to each ping request.
/// Therefore, we will process the contents of this line
/// and return a HeaderMap of the persisted headers.
fn process_metadata(path: &str, metadata: &str) -> Option<HeaderMap> {
#[derive(Deserialize)]
struct PingMetadata {
pub headers: HeaderMap,
}
if let Ok(metadata) = serde_json::from_str::<PingMetadata>(metadata) {
return Some(metadata.headers);
} else {
log::warn!("Error while parsing ping metadata: {}", path);
}
None
}
/// Manages the pings directories.
#[derive(Debug, Clone)]
pub struct PingDirectoryManager {
/// Path to the pending pings directory.
pending_pings_dir: PathBuf,
/// Path to the deletion-request pings directory.
deletion_request_pings_dir: PathBuf,
}
impl PingDirectoryManager {
/// Creates a new directory manager.
///
/// # Arguments
///
/// * `data_path` - Path to the pending pings directory.
pub fn new<P: Into<PathBuf>>(data_path: P) -> Self {
let data_path = data_path.into();
Self {
pending_pings_dir: data_path.join(PENDING_PINGS_DIRECTORY),
deletion_request_pings_dir: data_path.join(DELETION_REQUEST_PINGS_DIRECTORY),
}
}
/// Attempts to delete a ping file.
///
/// # Arguments
///
/// * `uuid` - The UUID of the ping file to be deleted
///
/// # Returns
///
/// Whether the file was successfully deleted.
///
/// # Panics
///
/// Won't panic if unable to delete the file.
pub fn delete_file(&self, uuid: &str) -> bool {
let path = match self.get_file_path(uuid) {
Some(path) => path,
None => {
log::error!("Cannot find ping file to delete {}", uuid);
return false;
}
};
match fs::remove_file(&path) {
Err(e) => {
log::error!("Error deleting file {}. {}", path.display(), e);
return false;
}
_ => log::info!("File was deleted {}", path.display()),
};
true
}
/// Reads a ping file and returns the data from it.
///
/// If the file is not properly formatted, it will be deleted and `None` will be returned.
///
/// # Arguments
///
/// * `document_id` - The UUID of the ping file to be processed
pub fn process_file(&self, document_id: &str) -> Option<PingPayload> {
let path = match self.get_file_path(document_id) {
Some(path) => path,
None => {
log::error!("Cannot find ping file to process {}", document_id);
return None;
}
};
let file = match File::open(&path) {
Ok(file) => file,
Err(e) => {
log::error!("Error reading ping file {}. {}", path.display(), e);
return None;
}
};
log::info!("Processing ping at: {}", path.display());
// The way the ping file is structured:
// first line should always have the path,
// second line should have the body with the ping contents in JSON format
// and third line might contain ping metadata e.g. additional headers.
let mut lines = BufReader::new(file).lines();
if let (Some(Ok(path)), Some(Ok(body)), Ok(metadata)) =
(lines.next(), lines.next(), lines.next().transpose())
{
let headers = metadata.map(|m| process_metadata(&path, &m)).flatten();
return Some((document_id.into(), path, body, headers));
} else {
log::warn!(
"Error processing ping file: {}. Ping file is not formatted as expected.",
document_id
);
}
self.delete_file(document_id);
None
}
/// Processes both ping directories.
pub fn process_dirs(&self) -> PingPayloadsByDirectory {
PingPayloadsByDirectory {
pending_pings: self.process_dir(&self.pending_pings_dir),
deletion_request_pings: self.process_dir(&self.deletion_request_pings_dir),
}
}
/// Processes one of the pings directory and return a vector with the ping data
/// corresponding to each valid ping file in the directory.
/// This vector will be ordered by file `modified_date`.
///
/// Any files that don't match the UUID regex will be deleted
/// to prevent files from polluting the pings directory.
///
/// # Returns
///
/// A vector of tuples with the file size and payload of each ping file in the directory.
fn process_dir(&self, dir: &Path) -> Vec<(u64, PingPayload)> {
log::info!("Processing persisted pings.");
let entries = match dir.read_dir() {
Ok(entries) => entries,
Err(_) => {
// This may error simply because the directory doesn't exist,
// which is expected if no pings were stored yet.
return Vec::new();
}
};
let mut pending_pings: Vec<_> = entries
.filter_map(|entry| entry.ok())
.filter_map(|entry| {
let path = entry.path();
if let Some(file_name) = get_file_name_as_str(&path) {
// Delete file if it doesn't match the pattern.
if Uuid::parse_str(file_name).is_err() {
log::warn!("Pattern mismatch. Deleting {}", path.display());
self.delete_file(file_name);
return None;
}
if let Some(data) = self.process_file(file_name) {
let metadata = match fs::metadata(&path) {
Ok(metadata) => metadata,
Err(e) => {
// There's a rare case where this races against a parallel deletion
// of all pending ping files.
// This could therefore fail, in which case we don't care about the
// result and can ignore the ping, it's already been deleted.
log::warn!(
"Unable to read metadata for file: {}, error: {:?}",
path.display(),
e
);
return None;
}
};
return Some((metadata, data));
}
};
None
})
.collect();
// This will sort the pings by date in ascending order (oldest -> newest).
pending_pings.sort_by(|(a, _), (b, _)| {
// We might not be able to get the modified date for a given file,
// in which case we just put it at the end.
if let (Ok(a), Ok(b)) = (a.modified(), b.modified()) {
a.cmp(&b)
} else {
Ordering::Less
}
});
pending_pings
.into_iter()
.map(|(metadata, data)| (metadata.len(), data))
.collect()
}
/// Gets the path for a ping file based on its document_id.
///
/// Will look for files in each ping directory until something is found.
/// If nothing is found, returns `None`.
fn get_file_path(&self, document_id: &str) -> Option<PathBuf> {
for dir in [&self.pending_pings_dir, &self.deletion_request_pings_dir].iter() {
let path = dir.join(document_id);
if path.exists() {
return Some(path);
}
}
None
}
}
#[cfg(test)]
mod test {
use std::fs::File;
use super::*;
use crate::metrics::PingType;
use crate::tests::new_glean;
#[test]
fn doesnt_panic_if_no_pending_pings_directory() {
let dir = tempfile::tempdir().unwrap();
let directory_manager = PingDirectoryManager::new(dir.path());
// Verify that processing the directory didn't panic
let data = directory_manager.process_dirs();
assert_eq!(data.pending_pings.len(), 0);
assert_eq!(data.deletion_request_pings.len(), 0);
}
#[test]
fn gets_correct_data_from_valid_ping_file() {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
let ping_type = PingType::new("test", true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
glean.submit_ping(&ping_type, None).unwrap();
let directory_manager = PingDirectoryManager::new(dir.path());
// Try and process the pings directories
let data = directory_manager.process_dirs();
// Verify there is just the one request
assert_eq!(data.pending_pings.len(), 1);
assert_eq!(data.deletion_request_pings.len(), 0);
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
let request_ping_type = ping.1.split('/').nth(3).unwrap();
assert_eq!(request_ping_type, "test");
}
#[test]
fn non_uuid_files_are_deleted_and_ignored() {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
let ping_type = PingType::new("test", true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
glean.submit_ping(&ping_type, None).unwrap();
let directory_manager = PingDirectoryManager::new(&dir.path());
let not_uuid_path = dir
.path()
.join(PENDING_PINGS_DIRECTORY)
.join("not-uuid-file-name.txt");
File::create(&not_uuid_path).unwrap();
// Try and process the pings directories
let data = directory_manager.process_dirs();
// Verify there is just the one request
assert_eq!(data.pending_pings.len(), 1);
assert_eq!(data.deletion_request_pings.len(), 0);
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
let request_ping_type = ping.1.split('/').nth(3).unwrap();
assert_eq!(request_ping_type, "test");
// Verify that file was indeed deleted
assert!(!not_uuid_path.exists());
}
#[test]
fn wrongly_formatted_files_are_deleted_and_ignored() {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
let ping_type = PingType::new("test", true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
glean.submit_ping(&ping_type, None).unwrap();
let directory_manager = PingDirectoryManager::new(&dir.path());
let wrong_contents_file_path = dir
.path()
.join(PENDING_PINGS_DIRECTORY)
.join(Uuid::new_v4().to_string());
File::create(&wrong_contents_file_path).unwrap();
// Try and process the pings directories
let data = directory_manager.process_dirs();
// Verify there is just the one request
assert_eq!(data.pending_pings.len(), 1);
assert_eq!(data.deletion_request_pings.len(), 0);
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
let request_ping_type = ping.1.split('/').nth(3).unwrap();
assert_eq!(request_ping_type, "test");
// Verify that file was indeed deleted
assert!(!wrong_contents_file_path.exists());
}
#[test]
fn takes_deletion_request_pings_into_account_while_processing() {
let (glean, dir) = new_glean(None);
// Submit a deletion request ping to populate deletion request folder.
glean
.internal_pings
.deletion_request
.submit(&glean, None)
.unwrap();
let directory_manager = PingDirectoryManager::new(dir.path());
// Try and process the pings directories
let data = directory_manager.process_dirs();
assert_eq!(data.pending_pings.len(), 0);
assert_eq!(data.deletion_request_pings.len(), 1);
// Verify request was returned for the "deletion-request" ping
let ping = &data.deletion_request_pings[0].1;
let request_ping_type = ping.1.split('/').nth(3).unwrap();
assert_eq!(request_ping_type, "deletion-request");
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Pings directory processing utilities.
use std::cmp::Ordering;
use std::fs::{self, File};
use std::io::{BufRead, BufReader};
use std::path::{Path, PathBuf};
use serde::Deserialize;
use uuid::Uuid;
use super::request::HeaderMap;
use crate::{DELETION_REQUEST_PINGS_DIRECTORY, PENDING_PINGS_DIRECTORY};
/// A representation of the data extracted from a ping file,
/// this will contain the document_id, path, JSON encoded body of a ping and the persisted headers.
pub type PingPayload = (String, String, String, Option<HeaderMap>);
/// A struct to hold the result of scanning all pings directories.
#[derive(Clone, Debug, Default)]
pub struct PingPayloadsByDirectory {
pub pending_pings: Vec<(u64, PingPayload)>,
pub deletion_request_pings: Vec<(u64, PingPayload)>,
}
impl PingPayloadsByDirectory {
/// Extends the data of this instance of PingPayloadsByDirectory
/// with the data from another instance of PingPayloadsByDirectory.
pub fn extend(&mut self, other: PingPayloadsByDirectory) {
self.pending_pings.extend(other.pending_pings);
self.deletion_request_pings
.extend(other.deletion_request_pings);
}
// Get the sum of the number of deletion request and regular pending pings.
pub fn len(&self) -> usize {
self.pending_pings.len() + self.deletion_request_pings.len()
}
}
/// Gets the file name from a path as a &str.
///
/// # Panics
///
/// Won't panic if not able to get file name.
fn get_file_name_as_str(path: &Path) -> Option<&str> {
match path.file_name() {
None => {
log::warn!("Error getting file name from path: {}", path.display());
None
}
Some(file_name) => {
let file_name = file_name.to_str();
if file_name.is_none() {
log::warn!("File name is not valid unicode: {}", path.display());
}
file_name
}
}
}
/// Processes a ping's metadata.
///
/// The metadata is an optional third line in the ping file,
/// currently it contains only additonal headers to be added to each ping request.
/// Therefore, we will process the contents of this line
/// and return a HeaderMap of the persisted headers.
fn process_metadata(path: &str, metadata: &str) -> Option<HeaderMap> {
#[derive(Deserialize)]
struct PingMetadata {
pub headers: HeaderMap,
}
if let Ok(metadata) = serde_json::from_str::<PingMetadata>(metadata) {
return Some(metadata.headers);
} else {
log::warn!("Error while parsing ping metadata: {}", path);
}
None
}
/// Manages the pings directories.
#[derive(Debug, Clone)]
pub struct PingDirectoryManager {
/// Path to the pending pings directory.
pending_pings_dir: PathBuf,
/// Path to the deletion-request pings directory.
deletion_request_pings_dir: PathBuf,
}
impl PingDirectoryManager {
/// Creates a new directory manager.
///
/// # Arguments
///
/// * `data_path` - Path to the pending pings directory.
pub fn new<P: Into<PathBuf>>(data_path: P) -> Self {
let data_path = data_path.into();
Self {
pending_pings_dir: data_path.join(PENDING_PINGS_DIRECTORY),
deletion_request_pings_dir: data_path.join(DELETION_REQUEST_PINGS_DIRECTORY),
}
}
/// Attempts to delete a ping file.
///
/// # Arguments
///
/// * `uuid` - The UUID of the ping file to be deleted
///
/// # Returns
///
/// Whether the file was successfully deleted.
///
/// # Panics
///
/// Won't panic if unable to delete the file.
pub fn delete_file(&self, uuid: &str) -> bool {
let path = match self.get_file_path(uuid) {
Some(path) => path,
None => {
log::error!("Cannot find ping file to delete {}", uuid);
return false;
}
};
match fs::remove_file(&path) {
Err(e) => {
log::error!("Error deleting file {}. {}", path.display(), e);
return false;
}
_ => log::info!("File was deleted {}", path.display()),
};
true
}
/// Reads a ping file and returns the data from it.
///
/// If the file is not properly formatted, it will be deleted and `None` will be returned.
///
/// # Arguments
///
/// * `document_id` - The UUID of the ping file to be processed
pub fn process_file(&self, document_id: &str) -> Option<PingPayload> {
let path = match self.get_file_path(document_id) {
Some(path) => path,
None => {
log::error!("Cannot find ping file to process {}", document_id);
return None;
}
};
let file = match File::open(&path) {
Ok(file) => file,
Err(e) => {
log::error!("Error reading ping file {}. {}", path.display(), e);
return None;
}
};
log::info!("Processing ping at: {}", path.display());
// The way the ping file is structured:
// first line should always have the path,
// second line should have the body with the ping contents in JSON format
// and third line might contain ping metadata e.g. additional headers.
let mut lines = BufReader::new(file).lines();
if let (Some(Ok(path)), Some(Ok(body)), Ok(metadata)) =
(lines.next(), lines.next(), lines.next().transpose())
{
let headers = metadata.map(|m| process_metadata(&path, &m)).flatten();
return Some((document_id.into(), path, body, headers));
} else {
log::warn!(
"Error processing ping file: {}. Ping file is not formatted as expected.",
document_id
);
}
self.delete_file(document_id);
None
}
/// Processes both ping directories.
pub fn process_dirs(&self) -> PingPayloadsByDirectory {
PingPayloadsByDirectory {
pending_pings: self.process_dir(&self.pending_pings_dir),
deletion_request_pings: self.process_dir(&self.deletion_request_pings_dir),
}
}
/// Processes one of the pings directory and return a vector with the ping data
/// corresponding to each valid ping file in the directory.
/// This vector will be ordered by file `modified_date`.
///
/// Any files that don't match the UUID regex will be deleted
/// to prevent files from polluting the pings directory.
///
/// # Returns
///
/// A vector of tuples with the file size and payload of each ping file in the directory.
fn process_dir(&self, dir: &Path) -> Vec<(u64, PingPayload)> {
log::info!("Processing persisted pings.");
let entries = match dir.read_dir() {
Ok(entries) => entries,
Err(_) => {
// This may error simply because the directory doesn't exist,
// which is expected if no pings were stored yet.
return Vec::new();
}
};
let mut pending_pings: Vec<_> = entries
.filter_map(|entry| entry.ok())
.filter_map(|entry| {
let path = entry.path();
if let Some(file_name) = get_file_name_as_str(&path) {
// Delete file if it doesn't match the pattern.
if Uuid::parse_str(file_name).is_err() {
log::warn!("Pattern mismatch. Deleting {}", path.display());
self.delete_file(file_name);
return None;
}
if let Some(data) = self.process_file(file_name) {
let metadata = match fs::metadata(&path) {
Ok(metadata) => metadata,
Err(e) => {
// There's a rare case where this races against a parallel deletion
// of all pending ping files.
// This could therefore fail, in which case we don't care about the
// result and can ignore the ping, it's already been deleted.
log::warn!(
"Unable to read metadata for file: {}, error: {:?}",
path.display(),
e
);
return None;
}
};
return Some((metadata, data));
}
};
None
})
.collect();
// This will sort the pings by date in ascending order (oldest -> newest).
pending_pings.sort_by(|(a, _), (b, _)| {
// We might not be able to get the modified date for a given file,
// in which case we just put it at the end.
if let (Ok(a), Ok(b)) = (a.modified(), b.modified()) {
a.cmp(&b)
} else {
Ordering::Less
}
});
pending_pings
.into_iter()
.map(|(metadata, data)| (metadata.len(), data))
.collect()
}
/// Gets the path for a ping file based on its document_id.
///
/// Will look for files in each ping directory until something is found.
/// If nothing is found, returns `None`.
fn get_file_path(&self, document_id: &str) -> Option<PathBuf> {
for dir in [&self.pending_pings_dir, &self.deletion_request_pings_dir].iter() {
let path = dir.join(document_id);
if path.exists() {
return Some(path);
}
}
None
}
}
#[cfg(test)]
mod test {
use std::fs::File;
use super::*;
use crate::metrics::PingType;
use crate::tests::new_glean;
#[test]
fn doesnt_panic_if_no_pending_pings_directory() {
let dir = tempfile::tempdir().unwrap();
let directory_manager = PingDirectoryManager::new(dir.path());
// Verify that processing the directory didn't panic
let data = directory_manager.process_dirs();
assert_eq!(data.pending_pings.len(), 0);
assert_eq!(data.deletion_request_pings.len(), 0);
}
#[test]
fn gets_correct_data_from_valid_ping_file() {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
let ping_type = PingType::new("test", true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
glean.submit_ping(&ping_type, None).unwrap();
let directory_manager = PingDirectoryManager::new(dir.path());
// Try and process the pings directories
let data = directory_manager.process_dirs();
// Verify there is just the one request
assert_eq!(data.pending_pings.len(), 1);
assert_eq!(data.deletion_request_pings.len(), 0);
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
let request_ping_type = ping.1.split('/').nth(3).unwrap();
assert_eq!(request_ping_type, "test");
}
#[test]
fn non_uuid_files_are_deleted_and_ignored() {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
let ping_type = PingType::new("test", true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
glean.submit_ping(&ping_type, None).unwrap();
let directory_manager = PingDirectoryManager::new(&dir.path());
let not_uuid_path = dir
.path()
.join(PENDING_PINGS_DIRECTORY)
.join("not-uuid-file-name.txt");
File::create(&not_uuid_path).unwrap();
// Try and process the pings directories
let data = directory_manager.process_dirs();
// Verify there is just the one request
assert_eq!(data.pending_pings.len(), 1);
assert_eq!(data.deletion_request_pings.len(), 0);
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
let request_ping_type = ping.1.split('/').nth(3).unwrap();
assert_eq!(request_ping_type, "test");
// Verify that file was indeed deleted
assert!(!not_uuid_path.exists());
}
#[test]
fn wrongly_formatted_files_are_deleted_and_ignored() {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
let ping_type = PingType::new("test", true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
glean.submit_ping(&ping_type, None).unwrap();
let directory_manager = PingDirectoryManager::new(&dir.path());
let wrong_contents_file_path = dir
.path()
.join(PENDING_PINGS_DIRECTORY)
.join(Uuid::new_v4().to_string());
File::create(&wrong_contents_file_path).unwrap();
// Try and process the pings directories
let data = directory_manager.process_dirs();
// Verify there is just the one request
assert_eq!(data.pending_pings.len(), 1);
assert_eq!(data.deletion_request_pings.len(), 0);
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
let request_ping_type = ping.1.split('/').nth(3).unwrap();
assert_eq!(request_ping_type, "test");
// Verify that file was indeed deleted
assert!(!wrong_contents_file_path.exists());
}
#[test]
fn takes_deletion_request_pings_into_account_while_processing() {
let (glean, dir) = new_glean(None);
// Submit a deletion request ping to populate deletion request folder.
glean
.internal_pings
.deletion_request
.submit(&glean, None)
.unwrap();
let directory_manager = PingDirectoryManager::new(dir.path());
// Try and process the pings directories
let data = directory_manager.process_dirs();
assert_eq!(data.pending_pings.len(), 0);
assert_eq!(data.deletion_request_pings.len(), 1);
// Verify request was returned for the "deletion-request" ping
let ping = &data.deletion_request_pings[0].1;
let request_ping_type = ping.1.split('/').nth(3).unwrap();
assert_eq!(request_ping_type, "deletion-request");
}
}

3050
third_party/rust/glean-core/src/upload/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,112 +1,112 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Policies for ping storage, uploading and requests.
const MAX_RECOVERABLE_FAILURES: u32 = 3;
const MAX_WAIT_ATTEMPTS: u32 = 3;
const MAX_PING_BODY_SIZE: usize = 1024 * 1024; // 1 MB
const MAX_PENDING_PINGS_DIRECTORY_SIZE: u64 = 10 * 1024 * 1024; // 10MB
// The average number of baseline pings per client (on Fenix) is at 15 pings a day.
// The P99 value is ~110.
// With a maximum of (a nice round) 250 we can store about 2 days worth of pings.
// A baseline ping file averages about 600 bytes, so that's a total of just 144 kB we store.
// With the default rate limit of 15 pings per 60s it would take roughly 16 minutes to send out all pending
// pings.
const MAX_PENDING_PINGS_COUNT: u64 = 250;
/// A struct holding the values for all the policies related to ping storage, uploading and requests.
#[derive(Debug)]
pub struct Policy {
/// The maximum recoverable failures allowed per uploading window.
///
/// Limiting this is necessary to avoid infinite loops on requesting upload tasks.
max_recoverable_failures: Option<u32>,
/// The maximum of [`PingUploadTask::Wait`] responses a user may get in a row
/// when calling [`get_upload_task`].
///
/// Limiting this is necessary to avoid infinite loops on requesting upload tasks.
max_wait_attempts: Option<u32>,
/// The maximum size in bytes a ping body may have to be eligible for upload.
max_ping_body_size: Option<usize>,
/// The maximum size in byte the pending pings directory may have on disk.
max_pending_pings_directory_size: Option<u64>,
/// The maximum number of pending pings on disk.
max_pending_pings_count: Option<u64>,
}
impl Default for Policy {
fn default() -> Self {
Policy {
max_recoverable_failures: Some(MAX_RECOVERABLE_FAILURES),
max_wait_attempts: Some(MAX_WAIT_ATTEMPTS),
max_ping_body_size: Some(MAX_PING_BODY_SIZE),
max_pending_pings_directory_size: Some(MAX_PENDING_PINGS_DIRECTORY_SIZE),
max_pending_pings_count: Some(MAX_PENDING_PINGS_COUNT),
}
}
}
impl Policy {
pub fn max_recoverable_failures(&self) -> u32 {
match &self.max_recoverable_failures {
Some(v) => *v,
None => u32::MAX,
}
}
#[cfg(test)]
pub fn set_max_recoverable_failures(&mut self, v: Option<u32>) {
self.max_recoverable_failures = v;
}
pub fn max_wait_attempts(&self) -> u32 {
match &self.max_wait_attempts {
Some(v) => *v,
None => u32::MAX,
}
}
#[cfg(test)]
pub fn set_max_wait_attempts(&mut self, v: Option<u32>) {
self.max_wait_attempts = v;
}
pub fn max_ping_body_size(&self) -> usize {
match &self.max_ping_body_size {
Some(v) => *v,
None => usize::MAX,
}
}
#[cfg(test)]
pub fn set_max_ping_body_size(&mut self, v: Option<usize>) {
self.max_ping_body_size = v;
}
pub fn max_pending_pings_directory_size(&self) -> u64 {
match &self.max_pending_pings_directory_size {
Some(v) => *v,
None => u64::MAX,
}
}
pub fn max_pending_pings_count(&self) -> u64 {
match &self.max_pending_pings_count {
Some(v) => *v,
None => u64::MAX,
}
}
#[cfg(test)]
pub fn set_max_pending_pings_directory_size(&mut self, v: Option<u64>) {
self.max_pending_pings_directory_size = v;
}
#[cfg(test)]
pub fn set_max_pending_pings_count(&mut self, v: Option<u64>) {
self.max_pending_pings_count = v;
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Policies for ping storage, uploading and requests.
const MAX_RECOVERABLE_FAILURES: u32 = 3;
const MAX_WAIT_ATTEMPTS: u32 = 3;
const MAX_PING_BODY_SIZE: usize = 1024 * 1024; // 1 MB
const MAX_PENDING_PINGS_DIRECTORY_SIZE: u64 = 10 * 1024 * 1024; // 10MB
// The average number of baseline pings per client (on Fenix) is at 15 pings a day.
// The P99 value is ~110.
// With a maximum of (a nice round) 250 we can store about 2 days worth of pings.
// A baseline ping file averages about 600 bytes, so that's a total of just 144 kB we store.
// With the default rate limit of 15 pings per 60s it would take roughly 16 minutes to send out all pending
// pings.
const MAX_PENDING_PINGS_COUNT: u64 = 250;
/// A struct holding the values for all the policies related to ping storage, uploading and requests.
#[derive(Debug)]
pub struct Policy {
/// The maximum recoverable failures allowed per uploading window.
///
/// Limiting this is necessary to avoid infinite loops on requesting upload tasks.
max_recoverable_failures: Option<u32>,
/// The maximum of [`PingUploadTask::Wait`] responses a user may get in a row
/// when calling [`get_upload_task`].
///
/// Limiting this is necessary to avoid infinite loops on requesting upload tasks.
max_wait_attempts: Option<u32>,
/// The maximum size in bytes a ping body may have to be eligible for upload.
max_ping_body_size: Option<usize>,
/// The maximum size in byte the pending pings directory may have on disk.
max_pending_pings_directory_size: Option<u64>,
/// The maximum number of pending pings on disk.
max_pending_pings_count: Option<u64>,
}
impl Default for Policy {
fn default() -> Self {
Policy {
max_recoverable_failures: Some(MAX_RECOVERABLE_FAILURES),
max_wait_attempts: Some(MAX_WAIT_ATTEMPTS),
max_ping_body_size: Some(MAX_PING_BODY_SIZE),
max_pending_pings_directory_size: Some(MAX_PENDING_PINGS_DIRECTORY_SIZE),
max_pending_pings_count: Some(MAX_PENDING_PINGS_COUNT),
}
}
}
impl Policy {
pub fn max_recoverable_failures(&self) -> u32 {
match &self.max_recoverable_failures {
Some(v) => *v,
None => u32::MAX,
}
}
#[cfg(test)]
pub fn set_max_recoverable_failures(&mut self, v: Option<u32>) {
self.max_recoverable_failures = v;
}
pub fn max_wait_attempts(&self) -> u32 {
match &self.max_wait_attempts {
Some(v) => *v,
None => u32::MAX,
}
}
#[cfg(test)]
pub fn set_max_wait_attempts(&mut self, v: Option<u32>) {
self.max_wait_attempts = v;
}
pub fn max_ping_body_size(&self) -> usize {
match &self.max_ping_body_size {
Some(v) => *v,
None => usize::MAX,
}
}
#[cfg(test)]
pub fn set_max_ping_body_size(&mut self, v: Option<usize>) {
self.max_ping_body_size = v;
}
pub fn max_pending_pings_directory_size(&self) -> u64 {
match &self.max_pending_pings_directory_size {
Some(v) => *v,
None => u64::MAX,
}
}
pub fn max_pending_pings_count(&self) -> u64 {
match &self.max_pending_pings_count {
Some(v) => *v,
None => u64::MAX,
}
}
#[cfg(test)]
pub fn set_max_pending_pings_directory_size(&mut self, v: Option<u64>) {
self.max_pending_pings_directory_size = v;
}
#[cfg(test)]
pub fn set_max_pending_pings_count(&mut self, v: Option<u64>) {
self.max_pending_pings_count = v;
}
}

Просмотреть файл

@ -1,291 +1,291 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Ping request representation.
use std::collections::HashMap;
use chrono::prelude::{DateTime, Utc};
use flate2::{read::GzDecoder, write::GzEncoder, Compression};
use serde_json::{self, Value as JsonValue};
use std::io::prelude::*;
use crate::error::{ErrorKind, Result};
use crate::system;
/// A representation for request headers.
pub type HeaderMap = HashMap<String, String>;
/// Creates a formatted date string that can be used with Date headers.
fn create_date_header_value(current_time: DateTime<Utc>) -> String {
// Date headers are required to be in the following format:
//
// <day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT
//
// as documented here:
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Date
// Unfortunately we can't use `current_time.to_rfc2822()` as it
// formats as "Mon, 22 Jun 2020 10:40:34 +0000", with an ending
// "+0000" instead of "GMT". That's why we need to go with manual
// formatting.
current_time.format("%a, %d %b %Y %T GMT").to_string()
}
fn create_user_agent_header_value(
version: &str,
language_binding_name: &str,
system: &str,
) -> String {
format!(
"Glean/{} ({} on {})",
version, language_binding_name, system
)
}
/// Attempt to gzip the contents of a ping.
fn gzip_content(path: &str, content: &[u8]) -> Option<Vec<u8>> {
let mut gzipper = GzEncoder::new(Vec::new(), Compression::default());
// Attempt to add the content to the gzipper.
if let Err(e) = gzipper.write_all(content) {
log::error!("Failed to write to the gzipper: {} - {:?}", path, e);
return None;
}
gzipper.finish().ok()
}
pub struct Builder {
document_id: Option<String>,
path: Option<String>,
body: Option<Vec<u8>>,
headers: HeaderMap,
body_max_size: usize,
}
impl Builder {
/// Creates a new builder for a PingRequest.
pub fn new(language_binding_name: &str, body_max_size: usize) -> Self {
let mut headers = HashMap::new();
headers.insert("Date".to_string(), create_date_header_value(Utc::now()));
headers.insert(
"User-Agent".to_string(),
create_user_agent_header_value(crate::GLEAN_VERSION, language_binding_name, system::OS),
);
headers.insert(
"Content-Type".to_string(),
"application/json; charset=utf-8".to_string(),
);
headers.insert("X-Client-Type".to_string(), "Glean".to_string());
headers.insert(
"X-Client-Version".to_string(),
crate::GLEAN_VERSION.to_string(),
);
Self {
document_id: None,
path: None,
body: None,
headers,
body_max_size,
}
}
/// Sets the document_id for this request.
pub fn document_id<S: Into<String>>(mut self, value: S) -> Self {
self.document_id = Some(value.into());
self
}
/// Sets the path for this request.
pub fn path<S: Into<String>>(mut self, value: S) -> Self {
self.path = Some(value.into());
self
}
/// Sets the body for this request.
///
/// This method will also attempt to gzip the body contents
/// and add headers related to the body that was just added.
///
/// Namely these headers are the "Content-Length" with the length of the body
/// and in case we are successfull on gzipping the contents, the "Content-Encoding"="gzip".
///
/// **Important**
/// If we are unable to gzip we don't panic and instead just set the uncompressed body.
///
/// # Panics
///
/// This method will panic in case we try to set the body before setting the path.
pub fn body<S: Into<String>>(mut self, value: S) -> Self {
// Attempt to gzip the body contents.
let original_as_string = value.into();
let gzipped_content = gzip_content(
self.path
.as_ref()
.expect("Path must be set before attempting to set the body"),
original_as_string.as_bytes(),
);
let add_gzip_header = gzipped_content.is_some();
let body = gzipped_content.unwrap_or_else(|| original_as_string.into_bytes());
// Include headers related to body
self = self.header("Content-Length", &body.len().to_string());
if add_gzip_header {
self = self.header("Content-Encoding", "gzip");
}
self.body = Some(body);
self
}
/// Sets a header for this request.
pub fn header<S: Into<String>>(mut self, key: S, value: S) -> Self {
self.headers.insert(key.into(), value.into());
self
}
/// Sets multiple headers for this request at once.
pub fn headers(mut self, values: HeaderMap) -> Self {
self.headers.extend(values);
self
}
/// Consumes the builder and create a PingRequest.
///
/// # Panics
///
/// This method will panic if any of the required fields are missing:
/// `document_id`, `path` and `body`.
pub fn build(self) -> Result<PingRequest> {
let body = self
.body
.expect("body must be set before attempting to build PingRequest");
if body.len() > self.body_max_size {
return Err(ErrorKind::PingBodyOverflow(body.len()).into());
}
Ok(PingRequest {
document_id: self
.document_id
.expect("document_id must be set before attempting to build PingRequest"),
path: self
.path
.expect("path must be set before attempting to build PingRequest"),
body,
headers: self.headers,
})
}
}
/// Represents a request to upload a ping.
#[derive(PartialEq, Debug, Clone)]
pub struct PingRequest {
/// The Job ID to identify this request,
/// this is the same as the ping UUID.
pub document_id: String,
/// The path for the server to upload the ping to.
pub path: String,
/// The body of the request, as a byte array. If gzip encoded, then
/// the `headers` list will contain a `Content-Encoding` header with
/// the value `gzip`.
pub body: Vec<u8>,
/// A map with all the headers to be sent with the request.
pub headers: HeaderMap,
}
impl PingRequest {
/// Creates a new builder-style structure to help build a PingRequest.
///
/// # Arguments
///
/// * `language_binding_name` - The name of the language used by the binding that instantiated this Glean instance.
/// This is used to build the User-Agent header value.
/// * `body_max_size` - The maximum size in bytes the compressed ping body may have to be eligible for upload.
pub fn builder(language_binding_name: &str, body_max_size: usize) -> Builder {
Builder::new(language_binding_name, body_max_size)
}
/// Verifies if current request is for a deletion-request ping.
pub fn is_deletion_request(&self) -> bool {
// The path format should be `/submit/<app_id>/<ping_name>/<schema_version/<doc_id>`
self.path
.split('/')
.nth(3)
.map(|url| url == "deletion-request")
.unwrap_or(false)
}
/// Decompresses and pretty-format the ping payload
///
/// Should be used for logging when required.
/// This decompresses the payload in memory.
pub fn pretty_body(&self) -> Option<String> {
let mut gz = GzDecoder::new(&self.body[..]);
let mut s = String::with_capacity(self.body.len());
gz.read_to_string(&mut s)
.ok()
.map(|_| &s[..])
.or_else(|| std::str::from_utf8(&self.body).ok())
.and_then(|payload| serde_json::from_str::<JsonValue>(payload).ok())
.and_then(|json| serde_json::to_string_pretty(&json).ok())
}
}
#[cfg(test)]
mod test {
use super::*;
use chrono::offset::TimeZone;
#[test]
fn date_header_resolution() {
let date: DateTime<Utc> = Utc.ymd(2018, 2, 25).and_hms(11, 10, 37);
let test_value = create_date_header_value(date);
assert_eq!("Sun, 25 Feb 2018 11:10:37 GMT", test_value);
}
#[test]
fn user_agent_header_resolution() {
let test_value = create_user_agent_header_value("0.0.0", "Rust", "Windows");
assert_eq!("Glean/0.0.0 (Rust on Windows)", test_value);
}
#[test]
fn correctly_builds_ping_request() {
let request = PingRequest::builder(/* language_binding_name */ "Rust", 1024 * 1024)
.document_id("woop")
.path("/random/path/doesnt/matter")
.body("{}")
.build()
.unwrap();
assert_eq!(request.document_id, "woop");
assert_eq!(request.path, "/random/path/doesnt/matter");
// Make sure all the expected headers were added.
assert!(request.headers.contains_key("Date"));
assert!(request.headers.contains_key("User-Agent"));
assert!(request.headers.contains_key("Content-Type"));
assert!(request.headers.contains_key("X-Client-Type"));
assert!(request.headers.contains_key("X-Client-Version"));
assert!(request.headers.contains_key("Content-Length"));
}
#[test]
fn errors_when_request_body_exceeds_max_size() {
// Create a new builder with an arbitrarily small value,
// se we can test that the builder errors when body max size exceeds the expected.
let request = Builder::new(
/* language_binding_name */ "Rust", /* body_max_size */ 1,
)
.document_id("woop")
.path("/random/path/doesnt/matter")
.body("{}")
.build();
assert!(request.is_err());
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Ping request representation.
use std::collections::HashMap;
use chrono::prelude::{DateTime, Utc};
use flate2::{read::GzDecoder, write::GzEncoder, Compression};
use serde_json::{self, Value as JsonValue};
use std::io::prelude::*;
use crate::error::{ErrorKind, Result};
use crate::system;
/// A representation for request headers.
pub type HeaderMap = HashMap<String, String>;
/// Creates a formatted date string that can be used with Date headers.
fn create_date_header_value(current_time: DateTime<Utc>) -> String {
// Date headers are required to be in the following format:
//
// <day-name>, <day> <month> <year> <hour>:<minute>:<second> GMT
//
// as documented here:
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Date
// Unfortunately we can't use `current_time.to_rfc2822()` as it
// formats as "Mon, 22 Jun 2020 10:40:34 +0000", with an ending
// "+0000" instead of "GMT". That's why we need to go with manual
// formatting.
current_time.format("%a, %d %b %Y %T GMT").to_string()
}
fn create_user_agent_header_value(
version: &str,
language_binding_name: &str,
system: &str,
) -> String {
format!(
"Glean/{} ({} on {})",
version, language_binding_name, system
)
}
/// Attempt to gzip the contents of a ping.
fn gzip_content(path: &str, content: &[u8]) -> Option<Vec<u8>> {
let mut gzipper = GzEncoder::new(Vec::new(), Compression::default());
// Attempt to add the content to the gzipper.
if let Err(e) = gzipper.write_all(content) {
log::error!("Failed to write to the gzipper: {} - {:?}", path, e);
return None;
}
gzipper.finish().ok()
}
pub struct Builder {
document_id: Option<String>,
path: Option<String>,
body: Option<Vec<u8>>,
headers: HeaderMap,
body_max_size: usize,
}
impl Builder {
/// Creates a new builder for a PingRequest.
pub fn new(language_binding_name: &str, body_max_size: usize) -> Self {
let mut headers = HashMap::new();
headers.insert("Date".to_string(), create_date_header_value(Utc::now()));
headers.insert(
"User-Agent".to_string(),
create_user_agent_header_value(crate::GLEAN_VERSION, language_binding_name, system::OS),
);
headers.insert(
"Content-Type".to_string(),
"application/json; charset=utf-8".to_string(),
);
headers.insert("X-Client-Type".to_string(), "Glean".to_string());
headers.insert(
"X-Client-Version".to_string(),
crate::GLEAN_VERSION.to_string(),
);
Self {
document_id: None,
path: None,
body: None,
headers,
body_max_size,
}
}
/// Sets the document_id for this request.
pub fn document_id<S: Into<String>>(mut self, value: S) -> Self {
self.document_id = Some(value.into());
self
}
/// Sets the path for this request.
pub fn path<S: Into<String>>(mut self, value: S) -> Self {
self.path = Some(value.into());
self
}
/// Sets the body for this request.
///
/// This method will also attempt to gzip the body contents
/// and add headers related to the body that was just added.
///
/// Namely these headers are the "Content-Length" with the length of the body
/// and in case we are successfull on gzipping the contents, the "Content-Encoding"="gzip".
///
/// **Important**
/// If we are unable to gzip we don't panic and instead just set the uncompressed body.
///
/// # Panics
///
/// This method will panic in case we try to set the body before setting the path.
pub fn body<S: Into<String>>(mut self, value: S) -> Self {
// Attempt to gzip the body contents.
let original_as_string = value.into();
let gzipped_content = gzip_content(
self.path
.as_ref()
.expect("Path must be set before attempting to set the body"),
original_as_string.as_bytes(),
);
let add_gzip_header = gzipped_content.is_some();
let body = gzipped_content.unwrap_or_else(|| original_as_string.into_bytes());
// Include headers related to body
self = self.header("Content-Length", &body.len().to_string());
if add_gzip_header {
self = self.header("Content-Encoding", "gzip");
}
self.body = Some(body);
self
}
/// Sets a header for this request.
pub fn header<S: Into<String>>(mut self, key: S, value: S) -> Self {
self.headers.insert(key.into(), value.into());
self
}
/// Sets multiple headers for this request at once.
pub fn headers(mut self, values: HeaderMap) -> Self {
self.headers.extend(values);
self
}
/// Consumes the builder and create a PingRequest.
///
/// # Panics
///
/// This method will panic if any of the required fields are missing:
/// `document_id`, `path` and `body`.
pub fn build(self) -> Result<PingRequest> {
let body = self
.body
.expect("body must be set before attempting to build PingRequest");
if body.len() > self.body_max_size {
return Err(ErrorKind::PingBodyOverflow(body.len()).into());
}
Ok(PingRequest {
document_id: self
.document_id
.expect("document_id must be set before attempting to build PingRequest"),
path: self
.path
.expect("path must be set before attempting to build PingRequest"),
body,
headers: self.headers,
})
}
}
/// Represents a request to upload a ping.
#[derive(PartialEq, Debug, Clone)]
pub struct PingRequest {
/// The Job ID to identify this request,
/// this is the same as the ping UUID.
pub document_id: String,
/// The path for the server to upload the ping to.
pub path: String,
/// The body of the request, as a byte array. If gzip encoded, then
/// the `headers` list will contain a `Content-Encoding` header with
/// the value `gzip`.
pub body: Vec<u8>,
/// A map with all the headers to be sent with the request.
pub headers: HeaderMap,
}
impl PingRequest {
/// Creates a new builder-style structure to help build a PingRequest.
///
/// # Arguments
///
/// * `language_binding_name` - The name of the language used by the binding that instantiated this Glean instance.
/// This is used to build the User-Agent header value.
/// * `body_max_size` - The maximum size in bytes the compressed ping body may have to be eligible for upload.
pub fn builder(language_binding_name: &str, body_max_size: usize) -> Builder {
Builder::new(language_binding_name, body_max_size)
}
/// Verifies if current request is for a deletion-request ping.
pub fn is_deletion_request(&self) -> bool {
// The path format should be `/submit/<app_id>/<ping_name>/<schema_version/<doc_id>`
self.path
.split('/')
.nth(3)
.map(|url| url == "deletion-request")
.unwrap_or(false)
}
/// Decompresses and pretty-format the ping payload
///
/// Should be used for logging when required.
/// This decompresses the payload in memory.
pub fn pretty_body(&self) -> Option<String> {
let mut gz = GzDecoder::new(&self.body[..]);
let mut s = String::with_capacity(self.body.len());
gz.read_to_string(&mut s)
.ok()
.map(|_| &s[..])
.or_else(|| std::str::from_utf8(&self.body).ok())
.and_then(|payload| serde_json::from_str::<JsonValue>(payload).ok())
.and_then(|json| serde_json::to_string_pretty(&json).ok())
}
}
#[cfg(test)]
mod test {
use super::*;
use chrono::offset::TimeZone;
#[test]
fn date_header_resolution() {
let date: DateTime<Utc> = Utc.ymd(2018, 2, 25).and_hms(11, 10, 37);
let test_value = create_date_header_value(date);
assert_eq!("Sun, 25 Feb 2018 11:10:37 GMT", test_value);
}
#[test]
fn user_agent_header_resolution() {
let test_value = create_user_agent_header_value("0.0.0", "Rust", "Windows");
assert_eq!("Glean/0.0.0 (Rust on Windows)", test_value);
}
#[test]
fn correctly_builds_ping_request() {
let request = PingRequest::builder(/* language_binding_name */ "Rust", 1024 * 1024)
.document_id("woop")
.path("/random/path/doesnt/matter")
.body("{}")
.build()
.unwrap();
assert_eq!(request.document_id, "woop");
assert_eq!(request.path, "/random/path/doesnt/matter");
// Make sure all the expected headers were added.
assert!(request.headers.contains_key("Date"));
assert!(request.headers.contains_key("User-Agent"));
assert!(request.headers.contains_key("Content-Type"));
assert!(request.headers.contains_key("X-Client-Type"));
assert!(request.headers.contains_key("X-Client-Version"));
assert!(request.headers.contains_key("Content-Length"));
}
#[test]
fn errors_when_request_body_exceeds_max_size() {
// Create a new builder with an arbitrarily small value,
// se we can test that the builder errors when body max size exceeds the expected.
let request = Builder::new(
/* language_binding_name */ "Rust", /* body_max_size */ 1,
)
.document_id("woop")
.path("/random/path/doesnt/matter")
.body("{}")
.build();
assert!(request.is_err());
}
}

Просмотреть файл

@ -1,83 +1,83 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// Result values of attempted ping uploads encoded for FFI use.
///
/// In a perfect world this would live in `glean-ffi`,
/// but because we also want to convert from pure integer values to a proper Rust enum
/// using Rust's `From` and `Into` trait, we need to have it in this crate
/// (The coherence rules don't allow to implement an external trait for an external type).
///
/// Due to restrictions of cbindgen they are re-defined in `glean-core/ffi/src/upload.rs`.
///
/// NOTE:
/// THEY MUST BE THE SAME ACROSS BOTH FILES!
pub mod ffi_upload_result {
/// A recoverable error.
pub const UPLOAD_RESULT_RECOVERABLE: u32 = 0x1;
/// An unrecoverable error.
pub const UPLOAD_RESULT_UNRECOVERABLE: u32 = 0x2;
/// A HTTP response code.
///
/// The actual response code is encoded in the lower bits.
pub const UPLOAD_RESULT_HTTP_STATUS: u32 = 0x8000;
}
use ffi_upload_result::*;
/// The result of an attempted ping upload.
#[derive(Debug)]
pub enum UploadResult {
/// A recoverable failure.
///
/// During upload something went wrong,
/// e.g. the network connection failed.
/// The upload should be retried at a later time.
RecoverableFailure,
/// An unrecoverable upload failure.
///
/// A possible cause might be a malformed URL.
UnrecoverableFailure,
/// A HTTP response code.
///
/// This can still indicate an error, depending on the status code.
HttpStatus(u32),
}
impl From<u32> for UploadResult {
fn from(status: u32) -> Self {
match status {
status if (status & UPLOAD_RESULT_HTTP_STATUS) == UPLOAD_RESULT_HTTP_STATUS => {
// Extract the status code from the lower bits.
let http_status = status & !UPLOAD_RESULT_HTTP_STATUS;
UploadResult::HttpStatus(http_status)
}
UPLOAD_RESULT_RECOVERABLE => UploadResult::RecoverableFailure,
UPLOAD_RESULT_UNRECOVERABLE => UploadResult::UnrecoverableFailure,
// Any unknown result code is treated as unrecoverable.
_ => UploadResult::UnrecoverableFailure,
}
}
}
impl UploadResult {
/// Gets the label to be used in recording error counts for upload.
///
/// Returns `None` if the upload finished succesfully.
/// Failures are recorded in the `ping_upload_failure` metric.
pub fn get_label(&self) -> Option<&str> {
match self {
UploadResult::HttpStatus(200..=299) => None,
UploadResult::HttpStatus(400..=499) => Some("status_code_4xx"),
UploadResult::HttpStatus(500..=599) => Some("status_code_5xx"),
UploadResult::HttpStatus(_) => Some("status_code_unknown"),
UploadResult::UnrecoverableFailure => Some("unrecoverable"),
UploadResult::RecoverableFailure => Some("recoverable"),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// Result values of attempted ping uploads encoded for FFI use.
///
/// In a perfect world this would live in `glean-ffi`,
/// but because we also want to convert from pure integer values to a proper Rust enum
/// using Rust's `From` and `Into` trait, we need to have it in this crate
/// (The coherence rules don't allow to implement an external trait for an external type).
///
/// Due to restrictions of cbindgen they are re-defined in `glean-core/ffi/src/upload.rs`.
///
/// NOTE:
/// THEY MUST BE THE SAME ACROSS BOTH FILES!
pub mod ffi_upload_result {
/// A recoverable error.
pub const UPLOAD_RESULT_RECOVERABLE: u32 = 0x1;
/// An unrecoverable error.
pub const UPLOAD_RESULT_UNRECOVERABLE: u32 = 0x2;
/// A HTTP response code.
///
/// The actual response code is encoded in the lower bits.
pub const UPLOAD_RESULT_HTTP_STATUS: u32 = 0x8000;
}
use ffi_upload_result::*;
/// The result of an attempted ping upload.
#[derive(Debug)]
pub enum UploadResult {
/// A recoverable failure.
///
/// During upload something went wrong,
/// e.g. the network connection failed.
/// The upload should be retried at a later time.
RecoverableFailure,
/// An unrecoverable upload failure.
///
/// A possible cause might be a malformed URL.
UnrecoverableFailure,
/// A HTTP response code.
///
/// This can still indicate an error, depending on the status code.
HttpStatus(u32),
}
impl From<u32> for UploadResult {
fn from(status: u32) -> Self {
match status {
status if (status & UPLOAD_RESULT_HTTP_STATUS) == UPLOAD_RESULT_HTTP_STATUS => {
// Extract the status code from the lower bits.
let http_status = status & !UPLOAD_RESULT_HTTP_STATUS;
UploadResult::HttpStatus(http_status)
}
UPLOAD_RESULT_RECOVERABLE => UploadResult::RecoverableFailure,
UPLOAD_RESULT_UNRECOVERABLE => UploadResult::UnrecoverableFailure,
// Any unknown result code is treated as unrecoverable.
_ => UploadResult::UnrecoverableFailure,
}
}
}
impl UploadResult {
/// Gets the label to be used in recording error counts for upload.
///
/// Returns `None` if the upload finished succesfully.
/// Failures are recorded in the `ping_upload_failure` metric.
pub fn get_label(&self) -> Option<&str> {
match self {
UploadResult::HttpStatus(200..=299) => None,
UploadResult::HttpStatus(400..=499) => Some("status_code_4xx"),
UploadResult::HttpStatus(500..=599) => Some("status_code_5xx"),
UploadResult::HttpStatus(_) => Some("status_code_unknown"),
UploadResult::UnrecoverableFailure => Some("unrecoverable"),
UploadResult::RecoverableFailure => Some("recoverable"),
}
}
}

546
third_party/rust/glean-core/src/util.rs поставляемый
Просмотреть файл

@ -1,273 +1,273 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use chrono::{DateTime, FixedOffset, Local};
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::TimeUnit;
use crate::CommonMetricData;
use crate::Glean;
/// Generates a pipeline-friendly string
/// that replaces non alphanumeric characters with dashes.
pub fn sanitize_application_id(application_id: &str) -> String {
let mut last_dash = false;
application_id
.chars()
.filter_map(|x| match x {
'A'..='Z' | 'a'..='z' | '0'..='9' => {
last_dash = false;
Some(x.to_ascii_lowercase())
}
_ => {
let result = if last_dash { None } else { Some('-') };
last_dash = true;
result
}
})
.collect()
}
/// Generates an ISO8601 compliant date/time string for the given time,
/// truncating it to the provided TimeUnit.
///
/// # Arguments
///
/// * `datetime` - the `DateTime` object that holds the date, time and timezone information.
/// * `truncate_to` - the desired resolution to use for the output string.
///
/// # Returns
///
/// A string representing the provided date/time truncated to the requested time unit.
pub fn get_iso_time_string(datetime: DateTime<FixedOffset>, truncate_to: TimeUnit) -> String {
datetime.format(truncate_to.format_pattern()).to_string()
}
/// Get the current date & time with a fixed-offset timezone.
///
/// This converts from the `Local` timezone into its fixed-offset equivalent.
pub(crate) fn local_now_with_offset() -> DateTime<FixedOffset> {
let now: DateTime<Local> = Local::now();
now.with_timezone(now.offset())
}
/// Truncates a string, ensuring that it doesn't end in the middle of a codepoint.
///
/// # Arguments
///
/// * `value` - The `String` to truncate.
/// * `length` - The length, in bytes, to truncate to. The resulting string will
/// be at most this many bytes, but may be shorter to prevent ending in the middle
/// of a codepoint.
///
/// # Returns
///
/// A string, with at most `length` bytes.
pub(crate) fn truncate_string_at_boundary<S: Into<String>>(value: S, length: usize) -> String {
let s = value.into();
if s.len() > length {
for i in (0..=length).rev() {
if s.is_char_boundary(i) {
return s[0..i].to_string();
}
}
// If we never saw a character boundary, the safest thing we can do is
// return the empty string, though this should never happen in practice.
return "".to_string();
}
s
}
/// Truncates a string, ensuring that it doesn't end in the middle of a codepoint.
/// If the string required truncation, records an error through the error
/// reporting mechanism.
///
/// # Arguments
///
/// * `glean` - The Glean instance the metric doing the truncation belongs to.
/// * `meta` - The metadata for the metric. Used for recording the error.
/// * `value` - The `String` to truncate.
/// * `length` - The length, in bytes, to truncate to. The resulting string will
/// be at most this many bytes, but may be shorter to prevent ending in the middle
/// of a codepoint.
///
/// # Returns
///
/// A string, with at most `length` bytes.
pub(crate) fn truncate_string_at_boundary_with_error<S: Into<String>>(
glean: &Glean,
meta: &CommonMetricData,
value: S,
length: usize,
) -> String {
let s = value.into();
if s.len() > length {
let msg = format!("Value length {} exceeds maximum of {}", s.len(), length);
record_error(glean, meta, ErrorType::InvalidOverflow, msg, None);
truncate_string_at_boundary(s, length)
} else {
s
}
}
// On i686 on Windows, the CPython interpreter sets the FPU precision control
// flag to 53 bits of precision, rather than the 64 bit default. On x86_64 on
// Windows, the CPython interpreter changes the rounding control settings. This
// causes different floating point results than on other architectures. This
// context manager makes it easy to set the correct precision and rounding control
// to match our other targets and platforms.
//
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1623335 for additional context.
#[cfg(all(target_os = "windows", target_env = "gnu"))]
pub mod floating_point_context {
// `size_t` is "pointer size", which is equivalent to Rust's `usize`.
// It's defined as such in libc:
// * https://github.com/rust-lang/libc/blob/bcbfeb5516cd5bb055198dbfbddf8d626fa2be07/src/unix/mod.rs#L19
// * https://github.com/rust-lang/libc/blob/bcbfeb5516cd5bb055198dbfbddf8d626fa2be07/src/windows/mod.rs#L16
#[allow(non_camel_case_types)]
type size_t = usize;
#[link(name = "m")]
extern "C" {
// Gets and sets the floating point control word.
// See documentation here:
// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/controlfp-s
fn _controlfp_s(current: *mut size_t, new: size_t, mask: size_t) -> size_t;
}
// Rounding control mask
const MCW_RC: size_t = 0x00000300;
// Round by truncation
const RC_CHOP: size_t = 0x00000300;
// Precision control mask
const MCW_PC: size_t = 0x00030000;
// Values for 64-bit precision
const PC_64: size_t = 0x00000000;
pub struct FloatingPointContext {
original_value: size_t,
}
impl FloatingPointContext {
pub fn new() -> Self {
let mut current: size_t = 0;
let _err = unsafe { _controlfp_s(&mut current, PC_64 | RC_CHOP, MCW_PC | MCW_RC) };
FloatingPointContext {
original_value: current,
}
}
}
impl Drop for FloatingPointContext {
fn drop(&mut self) {
let mut current: size_t = 0;
let _err = unsafe { _controlfp_s(&mut current, self.original_value, MCW_PC | MCW_RC) };
}
}
}
#[cfg(not(all(target_os = "windows", target_env = "gnu")))]
pub mod floating_point_context {
pub struct FloatingPointContext {}
impl FloatingPointContext {
pub fn new() -> Self {
FloatingPointContext {}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use chrono::offset::TimeZone;
#[test]
fn test_sanitize_application_id() {
assert_eq!(
"org-mozilla-test-app",
sanitize_application_id("org.mozilla.test-app")
);
assert_eq!(
"org-mozilla-test-app",
sanitize_application_id("org.mozilla..test---app")
);
assert_eq!(
"org-mozilla-test-app",
sanitize_application_id("org-mozilla-test-app")
);
assert_eq!(
"org-mozilla-test-app",
sanitize_application_id("org.mozilla.Test.App")
);
}
#[test]
fn test_get_iso_time_string() {
// `1985-07-03T12:09:14.000560274+01:00`
let dt = FixedOffset::east(3600)
.ymd(1985, 7, 3)
.and_hms_nano(12, 9, 14, 1_560_274);
assert_eq!(
"1985-07-03T12:09:14.001560274+01:00",
get_iso_time_string(dt, TimeUnit::Nanosecond)
);
assert_eq!(
"1985-07-03T12:09:14.001560+01:00",
get_iso_time_string(dt, TimeUnit::Microsecond)
);
assert_eq!(
"1985-07-03T12:09:14.001+01:00",
get_iso_time_string(dt, TimeUnit::Millisecond)
);
assert_eq!(
"1985-07-03T12:09:14+01:00",
get_iso_time_string(dt, TimeUnit::Second)
);
assert_eq!(
"1985-07-03T12:09+01:00",
get_iso_time_string(dt, TimeUnit::Minute)
);
assert_eq!(
"1985-07-03T12+01:00",
get_iso_time_string(dt, TimeUnit::Hour)
);
assert_eq!("1985-07-03+01:00", get_iso_time_string(dt, TimeUnit::Day));
}
#[test]
fn local_now_gets_the_time() {
let now = Local::now();
let fixed_now = local_now_with_offset();
// We can't compare across differing timezones, so we just compare the UTC timestamps.
// The second timestamp should be just a few nanoseconds later.
assert!(
fixed_now.naive_utc() >= now.naive_utc(),
"Time mismatch. Local now: {:?}, Fixed now: {:?}",
now,
fixed_now
);
}
#[test]
fn truncate_safely_test() {
let value = "电脑坏了".to_string();
let truncated = truncate_string_at_boundary(value, 10);
assert_eq!("电脑坏", truncated);
let value = "0123456789abcdef".to_string();
let truncated = truncate_string_at_boundary(value, 10);
assert_eq!("0123456789", truncated);
}
#[test]
#[should_panic]
fn truncate_naive() {
// Ensure that truncating the naïve way on this string would panic
let value = "电脑坏了".to_string();
value[0..10].to_string();
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use chrono::{DateTime, FixedOffset, Local};
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::TimeUnit;
use crate::CommonMetricData;
use crate::Glean;
/// Generates a pipeline-friendly string
/// that replaces non alphanumeric characters with dashes.
pub fn sanitize_application_id(application_id: &str) -> String {
let mut last_dash = false;
application_id
.chars()
.filter_map(|x| match x {
'A'..='Z' | 'a'..='z' | '0'..='9' => {
last_dash = false;
Some(x.to_ascii_lowercase())
}
_ => {
let result = if last_dash { None } else { Some('-') };
last_dash = true;
result
}
})
.collect()
}
/// Generates an ISO8601 compliant date/time string for the given time,
/// truncating it to the provided TimeUnit.
///
/// # Arguments
///
/// * `datetime` - the `DateTime` object that holds the date, time and timezone information.
/// * `truncate_to` - the desired resolution to use for the output string.
///
/// # Returns
///
/// A string representing the provided date/time truncated to the requested time unit.
pub fn get_iso_time_string(datetime: DateTime<FixedOffset>, truncate_to: TimeUnit) -> String {
datetime.format(truncate_to.format_pattern()).to_string()
}
/// Get the current date & time with a fixed-offset timezone.
///
/// This converts from the `Local` timezone into its fixed-offset equivalent.
pub(crate) fn local_now_with_offset() -> DateTime<FixedOffset> {
let now: DateTime<Local> = Local::now();
now.with_timezone(now.offset())
}
/// Truncates a string, ensuring that it doesn't end in the middle of a codepoint.
///
/// # Arguments
///
/// * `value` - The `String` to truncate.
/// * `length` - The length, in bytes, to truncate to. The resulting string will
/// be at most this many bytes, but may be shorter to prevent ending in the middle
/// of a codepoint.
///
/// # Returns
///
/// A string, with at most `length` bytes.
pub(crate) fn truncate_string_at_boundary<S: Into<String>>(value: S, length: usize) -> String {
let s = value.into();
if s.len() > length {
for i in (0..=length).rev() {
if s.is_char_boundary(i) {
return s[0..i].to_string();
}
}
// If we never saw a character boundary, the safest thing we can do is
// return the empty string, though this should never happen in practice.
return "".to_string();
}
s
}
/// Truncates a string, ensuring that it doesn't end in the middle of a codepoint.
/// If the string required truncation, records an error through the error
/// reporting mechanism.
///
/// # Arguments
///
/// * `glean` - The Glean instance the metric doing the truncation belongs to.
/// * `meta` - The metadata for the metric. Used for recording the error.
/// * `value` - The `String` to truncate.
/// * `length` - The length, in bytes, to truncate to. The resulting string will
/// be at most this many bytes, but may be shorter to prevent ending in the middle
/// of a codepoint.
///
/// # Returns
///
/// A string, with at most `length` bytes.
pub(crate) fn truncate_string_at_boundary_with_error<S: Into<String>>(
glean: &Glean,
meta: &CommonMetricData,
value: S,
length: usize,
) -> String {
let s = value.into();
if s.len() > length {
let msg = format!("Value length {} exceeds maximum of {}", s.len(), length);
record_error(glean, meta, ErrorType::InvalidOverflow, msg, None);
truncate_string_at_boundary(s, length)
} else {
s
}
}
// On i686 on Windows, the CPython interpreter sets the FPU precision control
// flag to 53 bits of precision, rather than the 64 bit default. On x86_64 on
// Windows, the CPython interpreter changes the rounding control settings. This
// causes different floating point results than on other architectures. This
// context manager makes it easy to set the correct precision and rounding control
// to match our other targets and platforms.
//
// See https://bugzilla.mozilla.org/show_bug.cgi?id=1623335 for additional context.
#[cfg(all(target_os = "windows", target_env = "gnu"))]
pub mod floating_point_context {
// `size_t` is "pointer size", which is equivalent to Rust's `usize`.
// It's defined as such in libc:
// * https://github.com/rust-lang/libc/blob/bcbfeb5516cd5bb055198dbfbddf8d626fa2be07/src/unix/mod.rs#L19
// * https://github.com/rust-lang/libc/blob/bcbfeb5516cd5bb055198dbfbddf8d626fa2be07/src/windows/mod.rs#L16
#[allow(non_camel_case_types)]
type size_t = usize;
#[link(name = "m")]
extern "C" {
// Gets and sets the floating point control word.
// See documentation here:
// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/controlfp-s
fn _controlfp_s(current: *mut size_t, new: size_t, mask: size_t) -> size_t;
}
// Rounding control mask
const MCW_RC: size_t = 0x00000300;
// Round by truncation
const RC_CHOP: size_t = 0x00000300;
// Precision control mask
const MCW_PC: size_t = 0x00030000;
// Values for 64-bit precision
const PC_64: size_t = 0x00000000;
pub struct FloatingPointContext {
original_value: size_t,
}
impl FloatingPointContext {
pub fn new() -> Self {
let mut current: size_t = 0;
let _err = unsafe { _controlfp_s(&mut current, PC_64 | RC_CHOP, MCW_PC | MCW_RC) };
FloatingPointContext {
original_value: current,
}
}
}
impl Drop for FloatingPointContext {
fn drop(&mut self) {
let mut current: size_t = 0;
let _err = unsafe { _controlfp_s(&mut current, self.original_value, MCW_PC | MCW_RC) };
}
}
}
#[cfg(not(all(target_os = "windows", target_env = "gnu")))]
pub mod floating_point_context {
pub struct FloatingPointContext {}
impl FloatingPointContext {
pub fn new() -> Self {
FloatingPointContext {}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use chrono::offset::TimeZone;
#[test]
fn test_sanitize_application_id() {
assert_eq!(
"org-mozilla-test-app",
sanitize_application_id("org.mozilla.test-app")
);
assert_eq!(
"org-mozilla-test-app",
sanitize_application_id("org.mozilla..test---app")
);
assert_eq!(
"org-mozilla-test-app",
sanitize_application_id("org-mozilla-test-app")
);
assert_eq!(
"org-mozilla-test-app",
sanitize_application_id("org.mozilla.Test.App")
);
}
#[test]
fn test_get_iso_time_string() {
// `1985-07-03T12:09:14.000560274+01:00`
let dt = FixedOffset::east(3600)
.ymd(1985, 7, 3)
.and_hms_nano(12, 9, 14, 1_560_274);
assert_eq!(
"1985-07-03T12:09:14.001560274+01:00",
get_iso_time_string(dt, TimeUnit::Nanosecond)
);
assert_eq!(
"1985-07-03T12:09:14.001560+01:00",
get_iso_time_string(dt, TimeUnit::Microsecond)
);
assert_eq!(
"1985-07-03T12:09:14.001+01:00",
get_iso_time_string(dt, TimeUnit::Millisecond)
);
assert_eq!(
"1985-07-03T12:09:14+01:00",
get_iso_time_string(dt, TimeUnit::Second)
);
assert_eq!(
"1985-07-03T12:09+01:00",
get_iso_time_string(dt, TimeUnit::Minute)
);
assert_eq!(
"1985-07-03T12+01:00",
get_iso_time_string(dt, TimeUnit::Hour)
);
assert_eq!("1985-07-03+01:00", get_iso_time_string(dt, TimeUnit::Day));
}
#[test]
fn local_now_gets_the_time() {
let now = Local::now();
let fixed_now = local_now_with_offset();
// We can't compare across differing timezones, so we just compare the UTC timestamps.
// The second timestamp should be just a few nanoseconds later.
assert!(
fixed_now.naive_utc() >= now.naive_utc(),
"Time mismatch. Local now: {:?}, Fixed now: {:?}",
now,
fixed_now
);
}
#[test]
fn truncate_safely_test() {
let value = "电脑坏了".to_string();
let truncated = truncate_string_at_boundary(value, 10);
assert_eq!("电脑坏", truncated);
let value = "0123456789abcdef".to_string();
let truncated = truncate_string_at_boundary(value, 10);
assert_eq!("0123456789", truncated);
}
#[test]
#[should_panic]
fn truncate_naive() {
// Ensure that truncating the naïve way on this string would panic
let value = "电脑坏了".to_string();
value[0..10].to_string();
}
}

Просмотреть файл

@ -1,142 +1,142 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// #[allow(dead_code)] is required on this module as a workaround for
// https://github.com/rust-lang/rust/issues/46379
#![allow(dead_code)]
use glean_core::{Glean, Result};
use std::fs::{read_dir, File};
use std::io::{BufRead, BufReader};
use std::path::Path;
use chrono::offset::TimeZone;
use iso8601::Date::YMD;
use serde_json::Value as JsonValue;
use ctor::ctor;
/// Initialize the logger for all tests without individual tests requiring to call the init code.
/// Log output can be controlled via the environment variable `RUST_LOG` for the `glean_core` crate,
/// e.g.:
///
/// ```
/// export RUST_LOG=glean_core=debug
/// ```
#[ctor]
fn enable_test_logging() {
// When testing we want all logs to go to stdout/stderr by default,
// without requiring each individual test to activate it.
// This only applies to glean-core tests, users of the main library still need to call
// `glean_enable_logging` of the FFI component (automatically done by the platform wrappers).
let _ = env_logger::builder().is_test(true).try_init();
}
pub fn tempdir() -> (tempfile::TempDir, String) {
let t = tempfile::tempdir().unwrap();
let name = t.path().display().to_string();
(t, name)
}
pub const GLOBAL_APPLICATION_ID: &str = "org.mozilla.glean.test.app";
// Creates a new instance of Glean with a temporary directory.
// We need to keep the `TempDir` alive, so that it's not deleted before we stop using it.
pub fn new_glean(tempdir: Option<tempfile::TempDir>) -> (Glean, tempfile::TempDir) {
let dir = match tempdir {
Some(tempdir) => tempdir,
None => tempfile::tempdir().unwrap(),
};
let tmpname = dir.path().display().to_string();
let cfg = glean_core::Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
language_binding_name: "Rust".into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
};
let glean = Glean::new(cfg).unwrap();
(glean, dir)
}
/// Converts an iso8601::DateTime to a chrono::DateTime<FixedOffset>
pub fn iso8601_to_chrono(datetime: &iso8601::DateTime) -> chrono::DateTime<chrono::FixedOffset> {
if let YMD { year, month, day } = datetime.date {
return chrono::FixedOffset::east(datetime.time.tz_offset_hours * 3600)
.ymd(year, month, day)
.and_hms_milli(
datetime.time.hour,
datetime.time.minute,
datetime.time.second,
datetime.time.millisecond,
);
};
panic!("Unsupported datetime format");
}
/// Gets a vector of the currently queued pings.
///
/// # Arguments
///
/// * `data_path` - Glean's data path, as returned from Glean::get_data_path()
///
/// # Returns
///
/// A vector of all queued pings.
///
/// Each entry is a pair `(url, json_data, metadata)`,
/// where `url` is the endpoint the ping will go to, `json_data` is the JSON payload
/// and metadata is optional persisted data related to the ping.
pub fn get_queued_pings(data_path: &Path) -> Result<Vec<(String, JsonValue, Option<JsonValue>)>> {
get_pings(&data_path.join("pending_pings"))
}
/// Gets a vector of the currently queued `deletion-request` pings.
///
/// # Arguments
///
/// * `data_path` - Glean's data path, as returned from Glean::get_data_path()
///
/// # Returns
///
/// A vector of all queued pings.
///
/// Each entry is a pair `(url, json_data, metadata)`,
/// where `url` is the endpoint the ping will go to, `json_data` is the JSON payload
/// and metadata is optional persisted data related to the ping.
pub fn get_deletion_pings(data_path: &Path) -> Result<Vec<(String, JsonValue, Option<JsonValue>)>> {
get_pings(&data_path.join("deletion_request"))
}
fn get_pings(pings_dir: &Path) -> Result<Vec<(String, JsonValue, Option<JsonValue>)>> {
let entries = read_dir(pings_dir)?;
Ok(entries
.filter_map(|entry| entry.ok())
.filter(|entry| match entry.file_type() {
Ok(file_type) => file_type.is_file(),
Err(_) => false,
})
.filter_map(|entry| File::open(entry.path()).ok())
.filter_map(|file| {
let mut lines = BufReader::new(file).lines();
if let (Some(Ok(url)), Some(Ok(body)), Ok(metadata)) =
(lines.next(), lines.next(), lines.next().transpose())
{
let parsed_metadata = metadata.map(|m| {
serde_json::from_str::<JsonValue>(&m).expect("metadata should be valid JSON")
});
if let Ok(parsed_body) = serde_json::from_str::<JsonValue>(&body) {
Some((url, parsed_body, parsed_metadata))
} else {
None
}
} else {
None
}
})
.collect())
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// #[allow(dead_code)] is required on this module as a workaround for
// https://github.com/rust-lang/rust/issues/46379
#![allow(dead_code)]
use glean_core::{Glean, Result};
use std::fs::{read_dir, File};
use std::io::{BufRead, BufReader};
use std::path::Path;
use chrono::offset::TimeZone;
use iso8601::Date::YMD;
use serde_json::Value as JsonValue;
use ctor::ctor;
/// Initialize the logger for all tests without individual tests requiring to call the init code.
/// Log output can be controlled via the environment variable `RUST_LOG` for the `glean_core` crate,
/// e.g.:
///
/// ```
/// export RUST_LOG=glean_core=debug
/// ```
#[ctor]
fn enable_test_logging() {
// When testing we want all logs to go to stdout/stderr by default,
// without requiring each individual test to activate it.
// This only applies to glean-core tests, users of the main library still need to call
// `glean_enable_logging` of the FFI component (automatically done by the platform wrappers).
let _ = env_logger::builder().is_test(true).try_init();
}
pub fn tempdir() -> (tempfile::TempDir, String) {
let t = tempfile::tempdir().unwrap();
let name = t.path().display().to_string();
(t, name)
}
pub const GLOBAL_APPLICATION_ID: &str = "org.mozilla.glean.test.app";
// Creates a new instance of Glean with a temporary directory.
// We need to keep the `TempDir` alive, so that it's not deleted before we stop using it.
pub fn new_glean(tempdir: Option<tempfile::TempDir>) -> (Glean, tempfile::TempDir) {
let dir = match tempdir {
Some(tempdir) => tempdir,
None => tempfile::tempdir().unwrap(),
};
let tmpname = dir.path().display().to_string();
let cfg = glean_core::Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
language_binding_name: "Rust".into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
};
let glean = Glean::new(cfg).unwrap();
(glean, dir)
}
/// Converts an iso8601::DateTime to a chrono::DateTime<FixedOffset>
pub fn iso8601_to_chrono(datetime: &iso8601::DateTime) -> chrono::DateTime<chrono::FixedOffset> {
if let YMD { year, month, day } = datetime.date {
return chrono::FixedOffset::east(datetime.time.tz_offset_hours * 3600)
.ymd(year, month, day)
.and_hms_milli(
datetime.time.hour,
datetime.time.minute,
datetime.time.second,
datetime.time.millisecond,
);
};
panic!("Unsupported datetime format");
}
/// Gets a vector of the currently queued pings.
///
/// # Arguments
///
/// * `data_path` - Glean's data path, as returned from Glean::get_data_path()
///
/// # Returns
///
/// A vector of all queued pings.
///
/// Each entry is a pair `(url, json_data, metadata)`,
/// where `url` is the endpoint the ping will go to, `json_data` is the JSON payload
/// and metadata is optional persisted data related to the ping.
pub fn get_queued_pings(data_path: &Path) -> Result<Vec<(String, JsonValue, Option<JsonValue>)>> {
get_pings(&data_path.join("pending_pings"))
}
/// Gets a vector of the currently queued `deletion-request` pings.
///
/// # Arguments
///
/// * `data_path` - Glean's data path, as returned from Glean::get_data_path()
///
/// # Returns
///
/// A vector of all queued pings.
///
/// Each entry is a pair `(url, json_data, metadata)`,
/// where `url` is the endpoint the ping will go to, `json_data` is the JSON payload
/// and metadata is optional persisted data related to the ping.
pub fn get_deletion_pings(data_path: &Path) -> Result<Vec<(String, JsonValue, Option<JsonValue>)>> {
get_pings(&data_path.join("deletion_request"))
}
fn get_pings(pings_dir: &Path) -> Result<Vec<(String, JsonValue, Option<JsonValue>)>> {
let entries = read_dir(pings_dir)?;
Ok(entries
.filter_map(|entry| entry.ok())
.filter(|entry| match entry.file_type() {
Ok(file_type) => file_type.is_file(),
Err(_) => false,
})
.filter_map(|entry| File::open(entry.path()).ok())
.filter_map(|file| {
let mut lines = BufReader::new(file).lines();
if let (Some(Ok(url)), Some(Ok(body)), Ok(metadata)) =
(lines.next(), lines.next(), lines.next().transpose())
{
let parsed_metadata = metadata.map(|m| {
serde_json::from_str::<JsonValue>(&m).expect("metadata should be valid JSON")
});
if let Ok(parsed_body) = serde_json::from_str::<JsonValue>(&body) {
Some((url, parsed_body, parsed_metadata))
} else {
None
}
} else {
None
}
})
.collect())
}

Просмотреть файл

@ -1,437 +1,437 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, ErrorType};
use glean_core::{CommonMetricData, Lifetime};
// Tests ported from glean-ac
mod linear {
use super::*;
#[test]
fn serializer_should_correctly_serialize_custom_distribution() {
let (mut tempdir, _) = tempdir();
{
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
metric.accumulate_samples_signed(&glean, vec![50]);
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(snapshot.sum, 50);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!(50),
snapshot["custom_distribution"]["telemetry.distribution"]["sum"]
);
}
}
#[test]
fn set_value_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
metric.accumulate_samples_signed(&glean, vec![50]);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!(50),
snapshot["custom_distribution"]["telemetry.distribution"]["sum"]
);
assert_eq!(
json!(1),
snapshot["custom_distribution"]["telemetry.distribution"]["values"]["50"]
);
}
}
// SKIPPED from glean-ac: memory distributions must not accumulate negative values
// This test doesn't apply to Rust, because we're using unsigned integers.
#[test]
fn the_accumulate_samples_api_correctly_stores_memory_values() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
.is_err());
}
#[test]
fn the_accumulate_samples_api_correctly_handles_negative_values() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
#[test]
fn json_snapshotting_works() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
metric.accumulate_samples_signed(&glean, vec![50]);
let snapshot = metric.test_get_value_as_json_string(&glean, "store1");
assert!(snapshot.is_some());
}
}
mod exponential {
use super::*;
#[test]
fn serializer_should_correctly_serialize_custom_distribution() {
let (mut tempdir, _) = tempdir();
{
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
metric.accumulate_samples_signed(&glean, vec![50]);
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(snapshot.sum, 50);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!(50),
snapshot["custom_distribution"]["telemetry.distribution"]["sum"]
);
}
}
#[test]
fn set_value_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
metric.accumulate_samples_signed(&glean, vec![50]);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!(50),
snapshot["custom_distribution"]["telemetry.distribution"]["sum"]
);
assert_eq!(
json!(1),
snapshot["custom_distribution"]["telemetry.distribution"]["values"]["29"]
);
}
}
// SKIPPED from glean-ac: memory distributions must not accumulate negative values
// This test doesn't apply to Rust, because we're using unsigned integers.
#[test]
fn the_accumulate_samples_api_correctly_stores_memory_values() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
.is_err());
}
#[test]
fn the_accumulate_samples_api_correctly_handles_negative_values() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
#[test]
fn json_snapshotting_works() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
metric.accumulate_samples_signed(&glean, vec![50]);
let snapshot = metric.test_get_value_as_json_string(&glean, "store1");
assert!(snapshot.is_some());
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, ErrorType};
use glean_core::{CommonMetricData, Lifetime};
// Tests ported from glean-ac
mod linear {
use super::*;
#[test]
fn serializer_should_correctly_serialize_custom_distribution() {
let (mut tempdir, _) = tempdir();
{
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
metric.accumulate_samples_signed(&glean, vec![50]);
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(snapshot.sum, 50);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!(50),
snapshot["custom_distribution"]["telemetry.distribution"]["sum"]
);
}
}
#[test]
fn set_value_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
metric.accumulate_samples_signed(&glean, vec![50]);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!(50),
snapshot["custom_distribution"]["telemetry.distribution"]["sum"]
);
assert_eq!(
json!(1),
snapshot["custom_distribution"]["telemetry.distribution"]["values"]["50"]
);
}
}
// SKIPPED from glean-ac: memory distributions must not accumulate negative values
// This test doesn't apply to Rust, because we're using unsigned integers.
#[test]
fn the_accumulate_samples_api_correctly_stores_memory_values() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
.is_err());
}
#[test]
fn the_accumulate_samples_api_correctly_handles_negative_values() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
#[test]
fn json_snapshotting_works() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
100,
HistogramType::Linear,
);
metric.accumulate_samples_signed(&glean, vec![50]);
let snapshot = metric.test_get_value_as_json_string(&glean, "store1");
assert!(snapshot.is_some());
}
}
mod exponential {
use super::*;
#[test]
fn serializer_should_correctly_serialize_custom_distribution() {
let (mut tempdir, _) = tempdir();
{
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
metric.accumulate_samples_signed(&glean, vec![50]);
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(snapshot.sum, 50);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!(50),
snapshot["custom_distribution"]["telemetry.distribution"]["sum"]
);
}
}
#[test]
fn set_value_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
metric.accumulate_samples_signed(&glean, vec![50]);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!(50),
snapshot["custom_distribution"]["telemetry.distribution"]["sum"]
);
assert_eq!(
json!(1),
snapshot["custom_distribution"]["telemetry.distribution"]["values"]["29"]
);
}
}
// SKIPPED from glean-ac: memory distributions must not accumulate negative values
// This test doesn't apply to Rust, because we're using unsigned integers.
#[test]
fn the_accumulate_samples_api_correctly_stores_memory_values() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
.is_err());
}
#[test]
fn the_accumulate_samples_api_correctly_handles_negative_values() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
#[test]
fn json_snapshotting_works() {
let (glean, _t) = new_glean(None);
let metric = CustomDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
1,
100,
10,
HistogramType::Exponential,
);
metric.accumulate_samples_signed(&glean, vec![50]);
let snapshot = metric.test_get_value_as_json_string(&glean, "store1");
assert!(snapshot.is_some());
}
}

580
third_party/rust/glean-core/tests/event.rs поставляемый
Просмотреть файл

@ -1,290 +1,290 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use std::collections::HashMap;
use std::fs;
use glean_core::metrics::*;
use glean_core::{CommonMetricData, Lifetime};
#[test]
fn record_properly_records_without_optional_arguments() {
let store_names = vec!["store1".into(), "store2".into()];
let (glean, _t) = new_glean(None);
let metric = EventMetric::new(
CommonMetricData {
name: "test_event_no_optional".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec![],
);
metric.record(&glean, 1000, None);
for store_name in store_names {
let events = metric.test_get_value(&glean, &store_name).unwrap();
assert_eq!(1, events.len());
assert_eq!("telemetry", events[0].category);
assert_eq!("test_event_no_optional", events[0].name);
assert!(events[0].extra.is_none());
}
}
#[test]
fn record_properly_records_with_optional_arguments() {
let (glean, _t) = new_glean(None);
let store_names = vec!["store1".into(), "store2".into()];
let metric = EventMetric::new(
CommonMetricData {
name: "test_event_no_optional".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec!["key1".into(), "key2".into()],
);
let extra: HashMap<i32, String> = [(0, "value1".into()), (1, "value2".into())]
.iter()
.cloned()
.collect();
metric.record(&glean, 1000, extra);
for store_name in store_names {
let events = metric.test_get_value(&glean, &store_name).unwrap();
let event = events[0].clone();
assert_eq!(1, events.len());
assert_eq!("telemetry", event.category);
assert_eq!("test_event_no_optional", event.name);
let extra = event.extra.unwrap();
assert_eq!(2, extra.len());
assert_eq!("value1", extra["key1"]);
assert_eq!("value2", extra["key2"]);
}
}
// SKIPPED record() computes the correct time between events
// Timing is now handled in the language-specific part.
#[test]
fn snapshot_returns_none_if_nothing_is_recorded_in_the_store() {
let (glean, _t) = new_glean(None);
assert!(glean
.event_storage()
.snapshot_as_json("store1", false)
.is_none())
}
#[test]
fn snapshot_correctly_clears_the_stores() {
let (glean, _t) = new_glean(None);
let store_names = vec!["store1".into(), "store2".into()];
let metric = EventMetric::new(
CommonMetricData {
name: "test_event_clear".into(),
category: "telemetry".into(),
send_in_pings: store_names,
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec![],
);
metric.record(&glean, 1000, None);
let snapshot = glean.event_storage().snapshot_as_json("store1", true);
assert!(snapshot.is_some());
assert!(glean
.event_storage()
.snapshot_as_json("store1", false)
.is_none());
let files: Vec<fs::DirEntry> = fs::read_dir(&glean.event_storage().path)
.unwrap()
.filter_map(|x| x.ok())
.collect();
assert_eq!(1, files.len());
assert_eq!("store2", files[0].file_name());
let snapshot2 = glean.event_storage().snapshot_as_json("store2", false);
for s in vec![snapshot, snapshot2] {
assert!(s.is_some());
let s = s.unwrap();
assert_eq!(1, s.as_array().unwrap().len());
assert_eq!("telemetry", s[0]["category"]);
assert_eq!("test_event_clear", s[0]["name"]);
println!("{:?}", s[0].get("extra"));
assert!(s[0].get("extra").is_none());
}
}
// SKIPPED: Events are serialized in the correct JSON format (no extra)
// SKIPPED: Events are serialized in the correct JSON format (with extra)
// This test won't work as-is since Rust doesn't maintain the insertion order in
// a JSON object, therefore you can't check the JSON output directly against a
// string. This check is redundant with other tests, anyway, and checking against
// the schema is much more useful.
#[test]
fn test_sending_of_event_ping_when_it_fills_up() {
let (mut glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["events".into()];
for store_name in &store_names {
glean.register_ping_type(&PingType::new(store_name.clone(), true, false, vec![]));
}
let click = EventMetric::new(
CommonMetricData {
name: "click".into(),
category: "ui".into(),
send_in_pings: store_names,
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec!["test_event_number".into()],
);
// We send 510 events. We expect to get the first 500 in the ping and 10
// remaining afterward
for i in 0..510 {
let mut extra: HashMap<i32, String> = HashMap::new();
extra.insert(0, i.to_string());
click.record(&glean, i, extra);
}
assert_eq!(10, click.test_get_value(&glean, "events").unwrap().len());
let (url, json, _) = &get_queued_pings(glean.get_data_path()).unwrap()[0];
assert!(url.starts_with(format!("/submit/{}/events/", glean.get_application_id()).as_str()));
assert_eq!(500, json["events"].as_array().unwrap().len());
assert_eq!(
"max_capacity",
json["ping_info"].as_object().unwrap()["reason"]
.as_str()
.unwrap()
);
for i in 0..500 {
let event = &json["events"].as_array().unwrap()[i];
assert_eq!(i.to_string(), event["extra"]["test_event_number"]);
}
let snapshot = glean
.event_storage()
.snapshot_as_json("events", false)
.unwrap();
assert_eq!(10, snapshot.as_array().unwrap().len());
for i in 0..10 {
let event = &snapshot.as_array().unwrap()[i];
assert_eq!((i + 500).to_string(), event["extra"]["test_event_number"]);
}
}
#[test]
fn extra_keys_must_be_recorded_and_truncated_if_needed() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into()];
let test_event = EventMetric::new(
CommonMetricData {
name: "testEvent".into(),
category: "ui".into(),
send_in_pings: store_names,
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec!["extra1".into(), "truncatedExtra".into()],
);
let test_value = "LeanGleanByFrank";
let mut extra: HashMap<i32, String> = HashMap::new();
extra.insert(0, test_value.to_string());
extra.insert(1, test_value.to_string().repeat(10));
test_event.record(&glean, 0, extra);
let snapshot = glean
.event_storage()
.snapshot_as_json("store1", false)
.unwrap();
assert_eq!(1, snapshot.as_array().unwrap().len());
let event = &snapshot.as_array().unwrap()[0];
assert_eq!("ui", event["category"]);
assert_eq!("testEvent", event["name"]);
assert_eq!(2, event["extra"].as_object().unwrap().len());
assert_eq!(test_value, event["extra"]["extra1"]);
assert_eq!(
test_value.to_string().repeat(10)[0..100],
event["extra"]["truncatedExtra"]
);
}
#[test]
fn snapshot_sorts_the_timestamps() {
let (glean, _t) = new_glean(None);
let metric = EventMetric::new(
CommonMetricData {
name: "test_event_clear".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec![],
);
metric.record(&glean, 1000, None);
metric.record(&glean, 100, None);
metric.record(&glean, 10000, None);
let snapshot = glean
.event_storage()
.snapshot_as_json("store1", true)
.unwrap();
assert_eq!(
0,
snapshot.as_array().unwrap()[0]["timestamp"]
.as_i64()
.unwrap()
);
assert_eq!(
900,
snapshot.as_array().unwrap()[1]["timestamp"]
.as_i64()
.unwrap()
);
assert_eq!(
9900,
snapshot.as_array().unwrap()[2]["timestamp"]
.as_i64()
.unwrap()
);
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use std::collections::HashMap;
use std::fs;
use glean_core::metrics::*;
use glean_core::{CommonMetricData, Lifetime};
#[test]
fn record_properly_records_without_optional_arguments() {
let store_names = vec!["store1".into(), "store2".into()];
let (glean, _t) = new_glean(None);
let metric = EventMetric::new(
CommonMetricData {
name: "test_event_no_optional".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec![],
);
metric.record(&glean, 1000, None);
for store_name in store_names {
let events = metric.test_get_value(&glean, &store_name).unwrap();
assert_eq!(1, events.len());
assert_eq!("telemetry", events[0].category);
assert_eq!("test_event_no_optional", events[0].name);
assert!(events[0].extra.is_none());
}
}
#[test]
fn record_properly_records_with_optional_arguments() {
let (glean, _t) = new_glean(None);
let store_names = vec!["store1".into(), "store2".into()];
let metric = EventMetric::new(
CommonMetricData {
name: "test_event_no_optional".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec!["key1".into(), "key2".into()],
);
let extra: HashMap<i32, String> = [(0, "value1".into()), (1, "value2".into())]
.iter()
.cloned()
.collect();
metric.record(&glean, 1000, extra);
for store_name in store_names {
let events = metric.test_get_value(&glean, &store_name).unwrap();
let event = events[0].clone();
assert_eq!(1, events.len());
assert_eq!("telemetry", event.category);
assert_eq!("test_event_no_optional", event.name);
let extra = event.extra.unwrap();
assert_eq!(2, extra.len());
assert_eq!("value1", extra["key1"]);
assert_eq!("value2", extra["key2"]);
}
}
// SKIPPED record() computes the correct time between events
// Timing is now handled in the language-specific part.
#[test]
fn snapshot_returns_none_if_nothing_is_recorded_in_the_store() {
let (glean, _t) = new_glean(None);
assert!(glean
.event_storage()
.snapshot_as_json("store1", false)
.is_none())
}
#[test]
fn snapshot_correctly_clears_the_stores() {
let (glean, _t) = new_glean(None);
let store_names = vec!["store1".into(), "store2".into()];
let metric = EventMetric::new(
CommonMetricData {
name: "test_event_clear".into(),
category: "telemetry".into(),
send_in_pings: store_names,
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec![],
);
metric.record(&glean, 1000, None);
let snapshot = glean.event_storage().snapshot_as_json("store1", true);
assert!(snapshot.is_some());
assert!(glean
.event_storage()
.snapshot_as_json("store1", false)
.is_none());
let files: Vec<fs::DirEntry> = fs::read_dir(&glean.event_storage().path)
.unwrap()
.filter_map(|x| x.ok())
.collect();
assert_eq!(1, files.len());
assert_eq!("store2", files[0].file_name());
let snapshot2 = glean.event_storage().snapshot_as_json("store2", false);
for s in vec![snapshot, snapshot2] {
assert!(s.is_some());
let s = s.unwrap();
assert_eq!(1, s.as_array().unwrap().len());
assert_eq!("telemetry", s[0]["category"]);
assert_eq!("test_event_clear", s[0]["name"]);
println!("{:?}", s[0].get("extra"));
assert!(s[0].get("extra").is_none());
}
}
// SKIPPED: Events are serialized in the correct JSON format (no extra)
// SKIPPED: Events are serialized in the correct JSON format (with extra)
// This test won't work as-is since Rust doesn't maintain the insertion order in
// a JSON object, therefore you can't check the JSON output directly against a
// string. This check is redundant with other tests, anyway, and checking against
// the schema is much more useful.
#[test]
fn test_sending_of_event_ping_when_it_fills_up() {
let (mut glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["events".into()];
for store_name in &store_names {
glean.register_ping_type(&PingType::new(store_name.clone(), true, false, vec![]));
}
let click = EventMetric::new(
CommonMetricData {
name: "click".into(),
category: "ui".into(),
send_in_pings: store_names,
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec!["test_event_number".into()],
);
// We send 510 events. We expect to get the first 500 in the ping and 10
// remaining afterward
for i in 0..510 {
let mut extra: HashMap<i32, String> = HashMap::new();
extra.insert(0, i.to_string());
click.record(&glean, i, extra);
}
assert_eq!(10, click.test_get_value(&glean, "events").unwrap().len());
let (url, json, _) = &get_queued_pings(glean.get_data_path()).unwrap()[0];
assert!(url.starts_with(format!("/submit/{}/events/", glean.get_application_id()).as_str()));
assert_eq!(500, json["events"].as_array().unwrap().len());
assert_eq!(
"max_capacity",
json["ping_info"].as_object().unwrap()["reason"]
.as_str()
.unwrap()
);
for i in 0..500 {
let event = &json["events"].as_array().unwrap()[i];
assert_eq!(i.to_string(), event["extra"]["test_event_number"]);
}
let snapshot = glean
.event_storage()
.snapshot_as_json("events", false)
.unwrap();
assert_eq!(10, snapshot.as_array().unwrap().len());
for i in 0..10 {
let event = &snapshot.as_array().unwrap()[i];
assert_eq!((i + 500).to_string(), event["extra"]["test_event_number"]);
}
}
#[test]
fn extra_keys_must_be_recorded_and_truncated_if_needed() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into()];
let test_event = EventMetric::new(
CommonMetricData {
name: "testEvent".into(),
category: "ui".into(),
send_in_pings: store_names,
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec!["extra1".into(), "truncatedExtra".into()],
);
let test_value = "LeanGleanByFrank";
let mut extra: HashMap<i32, String> = HashMap::new();
extra.insert(0, test_value.to_string());
extra.insert(1, test_value.to_string().repeat(10));
test_event.record(&glean, 0, extra);
let snapshot = glean
.event_storage()
.snapshot_as_json("store1", false)
.unwrap();
assert_eq!(1, snapshot.as_array().unwrap().len());
let event = &snapshot.as_array().unwrap()[0];
assert_eq!("ui", event["category"]);
assert_eq!("testEvent", event["name"]);
assert_eq!(2, event["extra"].as_object().unwrap().len());
assert_eq!(test_value, event["extra"]["extra1"]);
assert_eq!(
test_value.to_string().repeat(10)[0..100],
event["extra"]["truncatedExtra"]
);
}
#[test]
fn snapshot_sorts_the_timestamps() {
let (glean, _t) = new_glean(None);
let metric = EventMetric::new(
CommonMetricData {
name: "test_event_clear".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
vec![],
);
metric.record(&glean, 1000, None);
metric.record(&glean, 100, None);
metric.record(&glean, 10000, None);
let snapshot = glean
.event_storage()
.snapshot_as_json("store1", true)
.unwrap();
assert_eq!(
0,
snapshot.as_array().unwrap()[0]["timestamp"]
.as_i64()
.unwrap()
);
assert_eq!(
900,
snapshot.as_array().unwrap()[1]["timestamp"]
.as_i64()
.unwrap()
);
assert_eq!(
9900,
snapshot.as_array().unwrap()[2]["timestamp"]
.as_i64()
.unwrap()
);
}

226
third_party/rust/glean-core/tests/jwe.rs поставляемый
Просмотреть файл

@ -1,113 +1,113 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{CommonMetricData, Lifetime};
const HEADER: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ";
const KEY: &str = "OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg";
const INIT_VECTOR: &str = "48V1_ALb6US04U3b";
const CIPHER_TEXT: &str =
"5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A";
const AUTH_TAG: &str = "XFBoMYUZodetZdvTiFvSkQ";
const JWE: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ.OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg.48V1_ALb6US04U3b.5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A.XFBoMYUZodetZdvTiFvSkQ";
#[test]
fn jwe_metric_is_generated_and_stored() {
let (glean, _t) = new_glean(None);
let metric = JweMetric::new(CommonMetricData {
name: "jwe_metric".into(),
category: "local".into(),
send_in_pings: vec!["core".into()],
..Default::default()
});
metric.set_with_compact_representation(&glean, JWE);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "core", false)
.unwrap();
assert_eq!(
json!({"jwe": {"local.jwe_metric": metric.test_get_value(&glean, "core") }}),
snapshot
);
}
#[test]
fn set_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = JweMetric::new(CommonMetricData {
name: "jwe_metric".into(),
category: "local".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set_with_compact_representation(&glean, JWE);
// Check that the data was correctly set in each store.
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, false)
.unwrap();
assert_eq!(
json!({"jwe": {"local.jwe_metric": metric.test_get_value(&glean, &store_name) }}),
snapshot
);
}
}
#[test]
fn get_test_value_returns_the_period_delimited_string() {
let (glean, _t) = new_glean(None);
let metric = JweMetric::new(CommonMetricData {
name: "jwe_metric".into(),
category: "local".into(),
send_in_pings: vec!["core".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set_with_compact_representation(&glean, JWE);
assert_eq!(metric.test_get_value(&glean, "core").unwrap(), JWE);
}
#[test]
fn get_test_value_as_json_string_returns_the_expected_repr() {
let (glean, _t) = new_glean(None);
let metric = JweMetric::new(CommonMetricData {
name: "jwe_metric".into(),
category: "local".into(),
send_in_pings: vec!["core".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set_with_compact_representation(&glean, JWE);
let expected_json = format!("{{\"header\":\"{}\",\"key\":\"{}\",\"init_vector\":\"{}\",\"cipher_text\":\"{}\",\"auth_tag\":\"{}\"}}", HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG);
assert_eq!(
metric
.test_get_value_as_json_string(&glean, "core")
.unwrap(),
expected_json
);
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{CommonMetricData, Lifetime};
const HEADER: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ";
const KEY: &str = "OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg";
const INIT_VECTOR: &str = "48V1_ALb6US04U3b";
const CIPHER_TEXT: &str =
"5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A";
const AUTH_TAG: &str = "XFBoMYUZodetZdvTiFvSkQ";
const JWE: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ.OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg.48V1_ALb6US04U3b.5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A.XFBoMYUZodetZdvTiFvSkQ";
#[test]
fn jwe_metric_is_generated_and_stored() {
let (glean, _t) = new_glean(None);
let metric = JweMetric::new(CommonMetricData {
name: "jwe_metric".into(),
category: "local".into(),
send_in_pings: vec!["core".into()],
..Default::default()
});
metric.set_with_compact_representation(&glean, JWE);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "core", false)
.unwrap();
assert_eq!(
json!({"jwe": {"local.jwe_metric": metric.test_get_value(&glean, "core") }}),
snapshot
);
}
#[test]
fn set_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = JweMetric::new(CommonMetricData {
name: "jwe_metric".into(),
category: "local".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set_with_compact_representation(&glean, JWE);
// Check that the data was correctly set in each store.
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, false)
.unwrap();
assert_eq!(
json!({"jwe": {"local.jwe_metric": metric.test_get_value(&glean, &store_name) }}),
snapshot
);
}
}
#[test]
fn get_test_value_returns_the_period_delimited_string() {
let (glean, _t) = new_glean(None);
let metric = JweMetric::new(CommonMetricData {
name: "jwe_metric".into(),
category: "local".into(),
send_in_pings: vec!["core".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set_with_compact_representation(&glean, JWE);
assert_eq!(metric.test_get_value(&glean, "core").unwrap(), JWE);
}
#[test]
fn get_test_value_as_json_string_returns_the_expected_repr() {
let (glean, _t) = new_glean(None);
let metric = JweMetric::new(CommonMetricData {
name: "jwe_metric".into(),
category: "local".into(),
send_in_pings: vec!["core".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set_with_compact_representation(&glean, JWE);
let expected_json = format!("{{\"header\":\"{}\",\"key\":\"{}\",\"init_vector\":\"{}\",\"cipher_text\":\"{}\",\"auth_tag\":\"{}\"}}", HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG);
assert_eq!(
metric
.test_get_value_as_json_string(&glean, "core")
.unwrap(),
expected_json
);
}

790
third_party/rust/glean-core/tests/labeled.rs поставляемый
Просмотреть файл

@ -1,395 +1,395 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{CommonMetricData, Lifetime};
#[test]
fn can_create_labeled_counter_metric() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
Some(vec!["label1".into()]),
);
let metric = labeled.get("label1");
metric.add(&glean, 1);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"telemetry.labeled_metric": { "label1": 1 }
}
}),
snapshot
);
}
#[test]
fn can_create_labeled_string_metric() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
StringMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
Some(vec!["label1".into()]),
);
let metric = labeled.get("label1");
metric.set(&glean, "text");
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_string": {
"telemetry.labeled_metric": { "label1": "text" }
}
}),
snapshot
);
}
#[test]
fn can_create_labeled_bool_metric() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
BooleanMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
Some(vec!["label1".into()]),
);
let metric = labeled.get("label1");
metric.set(&glean, true);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_boolean": {
"telemetry.labeled_metric": { "label1": true }
}
}),
snapshot
);
}
#[test]
fn can_use_multiple_labels() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
let metric = labeled.get("label1");
metric.add(&glean, 1);
let metric = labeled.get("label2");
metric.add(&glean, 2);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"telemetry.labeled_metric": {
"label1": 1,
"label2": 2,
}
}
}),
snapshot
);
}
#[test]
fn labels_are_checked_against_static_list() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
Some(vec!["label1".into(), "label2".into()]),
);
let metric = labeled.get("label1");
metric.add(&glean, 1);
let metric = labeled.get("label2");
metric.add(&glean, 2);
// All non-registed labels get mapped to the `other` label
let metric = labeled.get("label3");
metric.add(&glean, 3);
let metric = labeled.get("label4");
metric.add(&glean, 4);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"telemetry.labeled_metric": {
"label1": 1,
"label2": 2,
"__other__": 7,
}
}
}),
snapshot
);
}
#[test]
fn dynamic_labels_too_long() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
let metric = labeled.get("this_string_has_more_than_thirty_characters");
metric.add(&glean, 1);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"glean.error.invalid_label": { "telemetry.labeled_metric": 1 },
"telemetry.labeled_metric": {
"__other__": 1,
}
}
}),
snapshot
);
}
#[test]
fn dynamic_labels_regex_mismatch() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
let labels_not_validating = vec![
"notSnakeCase",
"",
"with/slash",
"1.not_fine",
"this.$isnotfine",
"-.not_fine",
"this.is_not_fine.2",
];
let num_non_validating = labels_not_validating.len();
for label in &labels_not_validating {
labeled.get(label).add(&glean, 1);
}
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"glean.error.invalid_label": { "telemetry.labeled_metric": num_non_validating },
"telemetry.labeled_metric": {
"__other__": num_non_validating,
}
}
}),
snapshot
);
}
#[test]
fn dynamic_labels_regex_allowed() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
let labels_validating = vec![
"this.is.fine",
"this_is_fine_too",
"this.is_still_fine",
"thisisfine",
"_.is_fine",
"this.is-fine",
"this-is-fine",
];
for label in &labels_validating {
labeled.get(label).add(&glean, 1);
}
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"telemetry.labeled_metric": {
"this.is.fine": 1,
"this_is_fine_too": 1,
"this.is_still_fine": 1,
"thisisfine": 1,
"_.is_fine": 1,
"this.is-fine": 1,
"this-is-fine": 1
}
}
}),
snapshot
);
}
#[test]
fn seen_labels_get_reloaded_from_disk() {
let (mut tempdir, _) = tempdir();
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
// Store some data into labeled metrics
{
// Set the maximum number of labels
for i in 1..=16 {
let label = format!("label{}", i);
labeled.get(&label).add(&glean, i);
}
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", false)
.unwrap();
// Check that the data is there
for i in 1..=16 {
let label = format!("label{}", i);
assert_eq!(
i,
snapshot["labeled_counter"]["telemetry.labeled_metric"][&label]
);
}
drop(glean);
}
// Force a reload
{
let (glean, _) = new_glean(Some(tempdir));
// Try to store another label
labeled.get("new_label").add(&glean, 40);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", false)
.unwrap();
// Check that the old data is still there
for i in 1..=16 {
let label = format!("label{}", i);
assert_eq!(
i,
snapshot["labeled_counter"]["telemetry.labeled_metric"][&label]
);
}
// The new label lands in the __other__ bucket, due to too many labels
assert_eq!(
40,
snapshot["labeled_counter"]["telemetry.labeled_metric"]["__other__"]
);
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{CommonMetricData, Lifetime};
#[test]
fn can_create_labeled_counter_metric() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
Some(vec!["label1".into()]),
);
let metric = labeled.get("label1");
metric.add(&glean, 1);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"telemetry.labeled_metric": { "label1": 1 }
}
}),
snapshot
);
}
#[test]
fn can_create_labeled_string_metric() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
StringMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
Some(vec!["label1".into()]),
);
let metric = labeled.get("label1");
metric.set(&glean, "text");
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_string": {
"telemetry.labeled_metric": { "label1": "text" }
}
}),
snapshot
);
}
#[test]
fn can_create_labeled_bool_metric() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
BooleanMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
Some(vec!["label1".into()]),
);
let metric = labeled.get("label1");
metric.set(&glean, true);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_boolean": {
"telemetry.labeled_metric": { "label1": true }
}
}),
snapshot
);
}
#[test]
fn can_use_multiple_labels() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
let metric = labeled.get("label1");
metric.add(&glean, 1);
let metric = labeled.get("label2");
metric.add(&glean, 2);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"telemetry.labeled_metric": {
"label1": 1,
"label2": 2,
}
}
}),
snapshot
);
}
#[test]
fn labels_are_checked_against_static_list() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
Some(vec!["label1".into(), "label2".into()]),
);
let metric = labeled.get("label1");
metric.add(&glean, 1);
let metric = labeled.get("label2");
metric.add(&glean, 2);
// All non-registed labels get mapped to the `other` label
let metric = labeled.get("label3");
metric.add(&glean, 3);
let metric = labeled.get("label4");
metric.add(&glean, 4);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"telemetry.labeled_metric": {
"label1": 1,
"label2": 2,
"__other__": 7,
}
}
}),
snapshot
);
}
#[test]
fn dynamic_labels_too_long() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
let metric = labeled.get("this_string_has_more_than_thirty_characters");
metric.add(&glean, 1);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"glean.error.invalid_label": { "telemetry.labeled_metric": 1 },
"telemetry.labeled_metric": {
"__other__": 1,
}
}
}),
snapshot
);
}
#[test]
fn dynamic_labels_regex_mismatch() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
let labels_not_validating = vec![
"notSnakeCase",
"",
"with/slash",
"1.not_fine",
"this.$isnotfine",
"-.not_fine",
"this.is_not_fine.2",
];
let num_non_validating = labels_not_validating.len();
for label in &labels_not_validating {
labeled.get(label).add(&glean, 1);
}
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"glean.error.invalid_label": { "telemetry.labeled_metric": num_non_validating },
"telemetry.labeled_metric": {
"__other__": num_non_validating,
}
}
}),
snapshot
);
}
#[test]
fn dynamic_labels_regex_allowed() {
let (glean, _t) = new_glean(None);
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
let labels_validating = vec![
"this.is.fine",
"this_is_fine_too",
"this.is_still_fine",
"thisisfine",
"_.is_fine",
"this.is-fine",
"this-is-fine",
];
for label in &labels_validating {
labeled.get(label).add(&glean, 1);
}
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({
"labeled_counter": {
"telemetry.labeled_metric": {
"this.is.fine": 1,
"this_is_fine_too": 1,
"this.is_still_fine": 1,
"thisisfine": 1,
"_.is_fine": 1,
"this.is-fine": 1,
"this-is-fine": 1
}
}
}),
snapshot
);
}
#[test]
fn seen_labels_get_reloaded_from_disk() {
let (mut tempdir, _) = tempdir();
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let labeled = LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "labeled_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
}),
None,
);
// Store some data into labeled metrics
{
// Set the maximum number of labels
for i in 1..=16 {
let label = format!("label{}", i);
labeled.get(&label).add(&glean, i);
}
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", false)
.unwrap();
// Check that the data is there
for i in 1..=16 {
let label = format!("label{}", i);
assert_eq!(
i,
snapshot["labeled_counter"]["telemetry.labeled_metric"][&label]
);
}
drop(glean);
}
// Force a reload
{
let (glean, _) = new_glean(Some(tempdir));
// Try to store another label
labeled.get("new_label").add(&glean, 40);
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", false)
.unwrap();
// Check that the old data is still there
for i in 1..=16 {
let label = format!("label{}", i);
assert_eq!(
i,
snapshot["labeled_counter"]["telemetry.labeled_metric"][&label]
);
}
// The new label lands in the __other__ bucket, due to too many labels
assert_eq!(
40,
snapshot["labeled_counter"]["telemetry.labeled_metric"]["__other__"]
);
}
}

Просмотреть файл

@ -1,193 +1,193 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, ErrorType};
use glean_core::{CommonMetricData, Lifetime};
// Tests ported from glean-ac
#[test]
fn serializer_should_correctly_serialize_memory_distribution() {
let (mut tempdir, _) = tempdir();
let memory_unit = MemoryUnit::Kilobyte;
let kb = 1024;
{
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let metric = MemoryDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
memory_unit,
);
metric.accumulate(&glean, 100_000);
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(snapshot.sum, 100_000 * kb);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!(100_000 * kb),
snapshot["memory_distribution"]["telemetry.distribution"]["sum"]
);
}
}
#[test]
fn set_value_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = MemoryDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
MemoryUnit::Byte,
);
metric.accumulate(&glean, 100_000);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!(100_000),
snapshot["memory_distribution"]["telemetry.distribution"]["sum"]
);
assert_eq!(
json!(1),
snapshot["memory_distribution"]["telemetry.distribution"]["values"]["96785"]
);
}
}
// SKIPPED from glean-ac: memory distributions must not accumulate negative values
// This test doesn't apply to Rust, because we're using unsigned integers.
#[test]
fn the_accumulate_samples_api_correctly_stores_memory_values() {
let (glean, _t) = new_glean(None);
let metric = MemoryDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
MemoryUnit::Kilobyte,
);
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let kb = 1024;
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6 * kb);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1023]);
assert_eq!(1, snapshot.values[&2047]);
assert_eq!(1, snapshot.values[&3024]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
.is_err());
}
#[test]
fn the_accumulate_samples_api_correctly_handles_negative_values() {
let (glean, _t) = new_glean(None);
let metric = MemoryDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
MemoryUnit::Kilobyte,
);
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let kb = 1024;
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6 * kb);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1023]);
assert_eq!(1, snapshot.values[&2047]);
assert_eq!(1, snapshot.values[&3024]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, ErrorType};
use glean_core::{CommonMetricData, Lifetime};
// Tests ported from glean-ac
#[test]
fn serializer_should_correctly_serialize_memory_distribution() {
let (mut tempdir, _) = tempdir();
let memory_unit = MemoryUnit::Kilobyte;
let kb = 1024;
{
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let metric = MemoryDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
memory_unit,
);
metric.accumulate(&glean, 100_000);
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(snapshot.sum, 100_000 * kb);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!(100_000 * kb),
snapshot["memory_distribution"]["telemetry.distribution"]["sum"]
);
}
}
#[test]
fn set_value_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = MemoryDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
MemoryUnit::Byte,
);
metric.accumulate(&glean, 100_000);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!(100_000),
snapshot["memory_distribution"]["telemetry.distribution"]["sum"]
);
assert_eq!(
json!(1),
snapshot["memory_distribution"]["telemetry.distribution"]["values"]["96785"]
);
}
}
// SKIPPED from glean-ac: memory distributions must not accumulate negative values
// This test doesn't apply to Rust, because we're using unsigned integers.
#[test]
fn the_accumulate_samples_api_correctly_stores_memory_values() {
let (glean, _t) = new_glean(None);
let metric = MemoryDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
MemoryUnit::Kilobyte,
);
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let kb = 1024;
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6 * kb);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1023]);
assert_eq!(1, snapshot.values[&2047]);
assert_eq!(1, snapshot.values[&3024]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
.is_err());
}
#[test]
fn the_accumulate_samples_api_correctly_handles_negative_values() {
let (glean, _t) = new_glean(None);
let metric = MemoryDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
MemoryUnit::Kilobyte,
);
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let kb = 1024;
// Check that we got the right sum of samples.
assert_eq!(snapshot.sum, 6 * kb);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * kb)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&1023]);
assert_eq!(1, snapshot.values[&2047]);
assert_eq!(1, snapshot.values[&3024]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}

206
third_party/rust/glean-core/tests/ping.rs поставляемый
Просмотреть файл

@ -1,103 +1,103 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use glean_core::metrics::*;
use glean_core::CommonMetricData;
#[test]
fn write_ping_to_disk() {
let (mut glean, _temp) = new_glean(None);
let ping = PingType::new("metrics", true, false, vec![]);
glean.register_ping_type(&ping);
// We need to store a metric as an empty ping is not stored.
let counter = CounterMetric::new(CommonMetricData {
name: "counter".into(),
category: "local".into(),
send_in_pings: vec!["metrics".into()],
..Default::default()
});
counter.add(&glean, 1);
assert!(ping.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn disabling_upload_clears_pending_pings() {
let (mut glean, _) = new_glean(None);
let ping = PingType::new("metrics", true, false, vec![]);
glean.register_ping_type(&ping);
// We need to store a metric as an empty ping is not stored.
let counter = CounterMetric::new(CommonMetricData {
name: "counter".into(),
category: "local".into(),
send_in_pings: vec!["metrics".into()],
..Default::default()
});
counter.add(&glean, 1);
assert!(ping.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
// At this point no deletion_request ping should exist
// (that is: it's directory should not exist at all)
assert!(get_deletion_pings(glean.get_data_path()).is_err());
glean.set_upload_enabled(false);
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
// Disabling upload generates a deletion ping
assert_eq!(1, get_deletion_pings(glean.get_data_path()).unwrap().len());
glean.set_upload_enabled(true);
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
counter.add(&glean, 1);
assert!(ping.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn deletion_request_only_when_toggled_from_on_to_off() {
let (mut glean, _) = new_glean(None);
// Disabling upload generates a deletion ping
glean.set_upload_enabled(false);
assert_eq!(1, get_deletion_pings(glean.get_data_path()).unwrap().len());
// Re-setting it to `false` should not generate an additional ping.
// As we didn't clear the pending ping, that's the only one that sticks around.
glean.set_upload_enabled(false);
assert_eq!(1, get_deletion_pings(glean.get_data_path()).unwrap().len());
// Toggling back to true won't generate a ping either.
glean.set_upload_enabled(true);
assert_eq!(1, get_deletion_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn empty_pings_with_flag_are_sent() {
let (mut glean, _) = new_glean(None);
let ping1 = PingType::new("custom-ping1", true, true, vec![]);
glean.register_ping_type(&ping1);
let ping2 = PingType::new("custom-ping2", true, false, vec![]);
glean.register_ping_type(&ping2);
// No data is stored in either of the custom pings
// Sending this should succeed.
assert_eq!(true, ping1.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
// Sending this should fail.
assert_eq!(false, ping2.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use glean_core::metrics::*;
use glean_core::CommonMetricData;
#[test]
fn write_ping_to_disk() {
let (mut glean, _temp) = new_glean(None);
let ping = PingType::new("metrics", true, false, vec![]);
glean.register_ping_type(&ping);
// We need to store a metric as an empty ping is not stored.
let counter = CounterMetric::new(CommonMetricData {
name: "counter".into(),
category: "local".into(),
send_in_pings: vec!["metrics".into()],
..Default::default()
});
counter.add(&glean, 1);
assert!(ping.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn disabling_upload_clears_pending_pings() {
let (mut glean, _) = new_glean(None);
let ping = PingType::new("metrics", true, false, vec![]);
glean.register_ping_type(&ping);
// We need to store a metric as an empty ping is not stored.
let counter = CounterMetric::new(CommonMetricData {
name: "counter".into(),
category: "local".into(),
send_in_pings: vec!["metrics".into()],
..Default::default()
});
counter.add(&glean, 1);
assert!(ping.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
// At this point no deletion_request ping should exist
// (that is: it's directory should not exist at all)
assert!(get_deletion_pings(glean.get_data_path()).is_err());
glean.set_upload_enabled(false);
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
// Disabling upload generates a deletion ping
assert_eq!(1, get_deletion_pings(glean.get_data_path()).unwrap().len());
glean.set_upload_enabled(true);
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
counter.add(&glean, 1);
assert!(ping.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn deletion_request_only_when_toggled_from_on_to_off() {
let (mut glean, _) = new_glean(None);
// Disabling upload generates a deletion ping
glean.set_upload_enabled(false);
assert_eq!(1, get_deletion_pings(glean.get_data_path()).unwrap().len());
// Re-setting it to `false` should not generate an additional ping.
// As we didn't clear the pending ping, that's the only one that sticks around.
glean.set_upload_enabled(false);
assert_eq!(1, get_deletion_pings(glean.get_data_path()).unwrap().len());
// Toggling back to true won't generate a ping either.
glean.set_upload_enabled(true);
assert_eq!(1, get_deletion_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn empty_pings_with_flag_are_sent() {
let (mut glean, _) = new_glean(None);
let ping1 = PingType::new("custom-ping1", true, true, vec![]);
glean.register_ping_type(&ping1);
let ping2 = PingType::new("custom-ping2", true, false, vec![]);
glean.register_ping_type(&ping2);
// No data is stored in either of the custom pings
// Sending this should succeed.
assert_eq!(true, ping1.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
// Sending this should fail.
assert_eq!(false, ping2.submit(&glean, None).unwrap());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
}

Просмотреть файл

@ -1,210 +1,210 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use glean_core::metrics::*;
use glean_core::ping::PingMaker;
use glean_core::{CommonMetricData, Glean, Lifetime};
fn set_up_basic_ping() -> (Glean, PingMaker, PingType, tempfile::TempDir) {
let (tempdir, _) = tempdir();
let (mut glean, t) = new_glean(Some(tempdir));
let ping_maker = PingMaker::new();
let ping_type = PingType::new("store1", true, false, vec![]);
glean.register_ping_type(&ping_type);
// Record something, so the ping will have data
let metric = BooleanMetric::new(CommonMetricData {
name: "boolean_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, true);
(glean, ping_maker, ping_type, t)
}
#[test]
fn ping_info_must_contain_a_nonempty_start_and_end_time() {
let (glean, ping_maker, ping_type, _t) = set_up_basic_ping();
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let ping_info = content["ping_info"].as_object().unwrap();
let start_time_str = ping_info["start_time"].as_str().unwrap();
let start_time_date = iso8601_to_chrono(&iso8601::datetime(start_time_str).unwrap());
let end_time_str = ping_info["end_time"].as_str().unwrap();
let end_time_date = iso8601_to_chrono(&iso8601::datetime(end_time_str).unwrap());
assert!(start_time_date <= end_time_date);
}
#[test]
fn get_ping_info_must_report_all_the_required_fields() {
let (glean, ping_maker, ping_type, _t) = set_up_basic_ping();
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let ping_info = content["ping_info"].as_object().unwrap();
assert!(ping_info.get("start_time").is_some());
assert!(ping_info.get("end_time").is_some());
assert!(ping_info.get("seq").is_some());
}
#[test]
fn get_client_info_must_report_all_the_available_data() {
let (glean, ping_maker, ping_type, _t) = set_up_basic_ping();
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let client_info = content["client_info"].as_object().unwrap();
client_info["telemetry_sdk_build"].as_str().unwrap();
}
// SKIPPED from glean-ac: collect() must report a valid ping with the data from the engines
// This test doesn't really make sense with rkv
#[test]
fn collect_must_report_none_when_no_data_is_stored() {
// NOTE: This is a behavior change from glean-ac which returned an empty
// string in this case. As this is an implementation detail and not part of
// the public API, it's safe to change this.
let (mut glean, ping_maker, ping_type, _t) = set_up_basic_ping();
let unknown_ping_type = PingType::new("unknown", true, false, vec![]);
glean.register_ping_type(&ping_type);
assert!(ping_maker
.collect(&glean, &unknown_ping_type, None)
.is_none());
}
#[test]
fn seq_number_must_be_sequential() {
let (glean, ping_maker, _ping_type, _t) = set_up_basic_ping();
let metric = BooleanMetric::new(CommonMetricData {
name: "boolean_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store2".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, true);
for i in 0..=1 {
for ping_name in ["store1", "store2"].iter() {
let ping_type = PingType::new(*ping_name, true, false, vec![]);
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
// Ensure sequence numbers in different stores are independent of
// each other
assert_eq!(i, seq_num);
}
}
// Test that ping sequence numbers increase independently.
{
let ping_type = PingType::new("store1", true, false, vec![]);
// 3rd ping of store1
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
assert_eq!(2, seq_num);
// 4th ping of store1
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
assert_eq!(3, seq_num);
}
{
let ping_type = PingType::new("store2", true, false, vec![]);
// 3rd ping of store2
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
assert_eq!(2, seq_num);
}
{
let ping_type = PingType::new("store1", true, false, vec![]);
// 5th ping of store1
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
assert_eq!(4, seq_num);
}
}
#[test]
fn clear_pending_pings() {
let (mut glean, _) = new_glean(None);
let ping_maker = PingMaker::new();
let ping_type = PingType::new("store1", true, false, vec![]);
glean.register_ping_type(&ping_type);
// Record something, so the ping will have data
let metric = BooleanMetric::new(CommonMetricData {
name: "boolean_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, true);
assert!(glean.submit_ping(&ping_type, None).is_ok());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
assert!(ping_maker
.clear_pending_pings(glean.get_data_path())
.is_ok());
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn no_pings_submitted_if_upload_disabled() {
// Regression test, bug 1603571
let (mut glean, _) = new_glean(None);
let ping_type = PingType::new("store1", true, true, vec![]);
glean.register_ping_type(&ping_type);
assert!(glean.submit_ping(&ping_type, None).is_ok());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
// Disable upload, then try to sumbit
glean.set_upload_enabled(false);
assert!(glean.submit_ping(&ping_type, None).is_ok());
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
// Test again through the direct call
assert!(ping_type.submit(&glean, None).is_ok());
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn metadata_is_correctly_added_when_necessary() {
let (mut glean, _) = new_glean(None);
glean.set_debug_view_tag("valid-tag");
let ping_type = PingType::new("store1", true, true, vec![]);
glean.register_ping_type(&ping_type);
assert!(glean.submit_ping(&ping_type, None).is_ok());
let (_, _, metadata) = &get_queued_pings(glean.get_data_path()).unwrap()[0];
let headers = metadata.as_ref().unwrap().get("headers").unwrap();
assert_eq!(headers.get("X-Debug-ID").unwrap(), "valid-tag");
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use glean_core::metrics::*;
use glean_core::ping::PingMaker;
use glean_core::{CommonMetricData, Glean, Lifetime};
fn set_up_basic_ping() -> (Glean, PingMaker, PingType, tempfile::TempDir) {
let (tempdir, _) = tempdir();
let (mut glean, t) = new_glean(Some(tempdir));
let ping_maker = PingMaker::new();
let ping_type = PingType::new("store1", true, false, vec![]);
glean.register_ping_type(&ping_type);
// Record something, so the ping will have data
let metric = BooleanMetric::new(CommonMetricData {
name: "boolean_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, true);
(glean, ping_maker, ping_type, t)
}
#[test]
fn ping_info_must_contain_a_nonempty_start_and_end_time() {
let (glean, ping_maker, ping_type, _t) = set_up_basic_ping();
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let ping_info = content["ping_info"].as_object().unwrap();
let start_time_str = ping_info["start_time"].as_str().unwrap();
let start_time_date = iso8601_to_chrono(&iso8601::datetime(start_time_str).unwrap());
let end_time_str = ping_info["end_time"].as_str().unwrap();
let end_time_date = iso8601_to_chrono(&iso8601::datetime(end_time_str).unwrap());
assert!(start_time_date <= end_time_date);
}
#[test]
fn get_ping_info_must_report_all_the_required_fields() {
let (glean, ping_maker, ping_type, _t) = set_up_basic_ping();
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let ping_info = content["ping_info"].as_object().unwrap();
assert!(ping_info.get("start_time").is_some());
assert!(ping_info.get("end_time").is_some());
assert!(ping_info.get("seq").is_some());
}
#[test]
fn get_client_info_must_report_all_the_available_data() {
let (glean, ping_maker, ping_type, _t) = set_up_basic_ping();
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let client_info = content["client_info"].as_object().unwrap();
client_info["telemetry_sdk_build"].as_str().unwrap();
}
// SKIPPED from glean-ac: collect() must report a valid ping with the data from the engines
// This test doesn't really make sense with rkv
#[test]
fn collect_must_report_none_when_no_data_is_stored() {
// NOTE: This is a behavior change from glean-ac which returned an empty
// string in this case. As this is an implementation detail and not part of
// the public API, it's safe to change this.
let (mut glean, ping_maker, ping_type, _t) = set_up_basic_ping();
let unknown_ping_type = PingType::new("unknown", true, false, vec![]);
glean.register_ping_type(&ping_type);
assert!(ping_maker
.collect(&glean, &unknown_ping_type, None)
.is_none());
}
#[test]
fn seq_number_must_be_sequential() {
let (glean, ping_maker, _ping_type, _t) = set_up_basic_ping();
let metric = BooleanMetric::new(CommonMetricData {
name: "boolean_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store2".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, true);
for i in 0..=1 {
for ping_name in ["store1", "store2"].iter() {
let ping_type = PingType::new(*ping_name, true, false, vec![]);
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
// Ensure sequence numbers in different stores are independent of
// each other
assert_eq!(i, seq_num);
}
}
// Test that ping sequence numbers increase independently.
{
let ping_type = PingType::new("store1", true, false, vec![]);
// 3rd ping of store1
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
assert_eq!(2, seq_num);
// 4th ping of store1
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
assert_eq!(3, seq_num);
}
{
let ping_type = PingType::new("store2", true, false, vec![]);
// 3rd ping of store2
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
assert_eq!(2, seq_num);
}
{
let ping_type = PingType::new("store1", true, false, vec![]);
// 5th ping of store1
let content = ping_maker.collect(&glean, &ping_type, None).unwrap();
let seq_num = content["ping_info"]["seq"].as_i64().unwrap();
assert_eq!(4, seq_num);
}
}
#[test]
fn clear_pending_pings() {
let (mut glean, _) = new_glean(None);
let ping_maker = PingMaker::new();
let ping_type = PingType::new("store1", true, false, vec![]);
glean.register_ping_type(&ping_type);
// Record something, so the ping will have data
let metric = BooleanMetric::new(CommonMetricData {
name: "boolean_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, true);
assert!(glean.submit_ping(&ping_type, None).is_ok());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
assert!(ping_maker
.clear_pending_pings(glean.get_data_path())
.is_ok());
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn no_pings_submitted_if_upload_disabled() {
// Regression test, bug 1603571
let (mut glean, _) = new_glean(None);
let ping_type = PingType::new("store1", true, true, vec![]);
glean.register_ping_type(&ping_type);
assert!(glean.submit_ping(&ping_type, None).is_ok());
assert_eq!(1, get_queued_pings(glean.get_data_path()).unwrap().len());
// Disable upload, then try to sumbit
glean.set_upload_enabled(false);
assert!(glean.submit_ping(&ping_type, None).is_ok());
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
// Test again through the direct call
assert!(ping_type.submit(&glean, None).is_ok());
assert_eq!(0, get_queued_pings(glean.get_data_path()).unwrap().len());
}
#[test]
fn metadata_is_correctly_added_when_necessary() {
let (mut glean, _) = new_glean(None);
glean.set_debug_view_tag("valid-tag");
let ping_type = PingType::new("store1", true, true, vec![]);
glean.register_ping_type(&ping_type);
assert!(glean.submit_ping(&ping_type, None).is_ok());
let (_, _, metadata) = &get_queued_pings(glean.get_data_path()).unwrap()[0];
let headers = metadata.as_ref().unwrap().get("headers").unwrap();
assert_eq!(headers.get("X-Debug-ID").unwrap(), "valid-tag");
}

242
third_party/rust/glean-core/tests/string.rs поставляемый
Просмотреть файл

@ -1,121 +1,121 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, ErrorType};
use glean_core::{CommonMetricData, Lifetime};
// SKIPPED from glean-ac: string deserializer should correctly parse integers
// This test doesn't really apply to rkv
#[test]
fn string_serializer_should_correctly_serialize_strings() {
let (mut tempdir, _) = tempdir();
{
// We give tempdir to the `new_glean` function...
let (glean, dir) = new_glean(Some(tempdir));
// And then we get it back once that function returns.
tempdir = dir;
let metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, "test_string_value");
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({"string": {"telemetry.string_metric": "test_string_value"}}),
snapshot
);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({"string": {"telemetry.string_metric": "test_string_value"}}),
snapshot
);
}
}
#[test]
fn set_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set(&glean, "test_string_value");
// Check that the data was correctly set in each store.
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!({"string": {"telemetry.string_metric": "test_string_value"}}),
snapshot
);
}
}
// SKIPPED from glean-ac: strings are serialized in the correct JSON format
// Completely redundant with other tests.
#[test]
fn long_string_values_are_truncated() {
let (glean, _t) = new_glean(None);
let metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
let test_sting = "01234567890".repeat(20);
metric.set(&glean, test_sting.clone());
// Check that data was truncated
assert_eq!(
test_sting[..100],
metric.test_get_value(&glean, "store1").unwrap()
);
// Make sure that the errors have been recorded
assert_eq!(
Ok(1),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow, None)
);
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, ErrorType};
use glean_core::{CommonMetricData, Lifetime};
// SKIPPED from glean-ac: string deserializer should correctly parse integers
// This test doesn't really apply to rkv
#[test]
fn string_serializer_should_correctly_serialize_strings() {
let (mut tempdir, _) = tempdir();
{
// We give tempdir to the `new_glean` function...
let (glean, dir) = new_glean(Some(tempdir));
// And then we get it back once that function returns.
tempdir = dir;
let metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, "test_string_value");
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({"string": {"telemetry.string_metric": "test_string_value"}}),
snapshot
);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({"string": {"telemetry.string_metric": "test_string_value"}}),
snapshot
);
}
}
#[test]
fn set_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set(&glean, "test_string_value");
// Check that the data was correctly set in each store.
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!({"string": {"telemetry.string_metric": "test_string_value"}}),
snapshot
);
}
}
// SKIPPED from glean-ac: strings are serialized in the correct JSON format
// Completely redundant with other tests.
#[test]
fn long_string_values_are_truncated() {
let (glean, _t) = new_glean(None);
let metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
let test_sting = "01234567890".repeat(20);
metric.set(&glean, test_sting.clone());
// Check that data was truncated
assert_eq!(
test_sting[..100],
metric.test_get_value(&glean, "store1").unwrap()
);
// Make sure that the errors have been recorded
assert_eq!(
Ok(1),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow, None)
);
}

Просмотреть файл

@ -1,249 +1,249 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, CommonMetricData, ErrorType, Lifetime};
#[test]
fn list_can_store_multiple_items() {
let (glean, _t) = new_glean(None);
let list: StringListMetric = StringListMetric::new(CommonMetricData {
name: "list".into(),
category: "local".into(),
send_in_pings: vec!["core".into()],
..Default::default()
});
list.add(&glean, "first");
assert_eq!(list.test_get_value(&glean, "core").unwrap(), vec!["first"]);
list.add(&glean, "second");
assert_eq!(
list.test_get_value(&glean, "core").unwrap(),
vec!["first", "second"]
);
list.set(&glean, vec!["third".into()]);
assert_eq!(list.test_get_value(&glean, "core").unwrap(), vec!["third"]);
list.add(&glean, "fourth");
assert_eq!(
list.test_get_value(&glean, "core").unwrap(),
vec!["third", "fourth"]
);
}
#[test]
fn stringlist_serializer_should_correctly_serialize_stringlists() {
let (mut tempdir, _) = tempdir();
{
// We give tempdir to the `new_glean` function...
let (glean, dir) = new_glean(Some(tempdir));
// And then we get it back once that function returns.
tempdir = dir;
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, vec!["test_string_1".into(), "test_string_2".into()]);
}
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({"string_list": {"telemetry.test.string_list_metric": ["test_string_1", "test_string_2"]}}),
snapshot
);
}
}
#[test]
fn set_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set(&glean, vec!["test_string_1".into(), "test_string_2".into()]);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!({"string_list": {"telemetry.test.string_list_metric": ["test_string_1", "test_string_2"]}}),
snapshot
);
}
}
#[test]
fn long_string_values_are_truncated() {
let (glean, _t) = new_glean(None);
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
let test_string = "0123456789".repeat(20);
metric.add(&glean, test_string.clone());
// Ensure the string was truncated to the proper length.
assert_eq!(
vec![test_string[..50].to_string()],
metric.test_get_value(&glean, "store1").unwrap()
);
// Ensure the error has been recorded.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow, None)
);
metric.set(&glean, vec![test_string.clone()]);
// Ensure the string was truncated to the proper length.
assert_eq!(
vec![test_string[..50].to_string()],
metric.test_get_value(&glean, "store1").unwrap()
);
// Ensure the error has been recorded.
assert_eq!(
Ok(2),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow, None)
);
}
#[test]
fn disabled_string_lists_dont_record() {
let (glean, _t) = new_glean(None);
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: true,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.add(&glean, "test_string".repeat(20));
// Ensure the string was not added.
assert_eq!(None, metric.test_get_value(&glean, "store1"));
metric.set(&glean, vec!["test_string_2".repeat(20)]);
// Ensure the stringlist was not set.
assert_eq!(None, metric.test_get_value(&glean, "store1"));
// Ensure no error was recorded.
assert!(
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidValue, None).is_err()
);
}
#[test]
fn string_lists_dont_exceed_max_items() {
let (glean, _t) = new_glean(None);
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
for _n in 1..21 {
metric.add(&glean, "test_string");
}
let expected: Vec<String> = "test_string "
.repeat(20)
.split_whitespace()
.map(|s| s.to_string())
.collect();
assert_eq!(expected, metric.test_get_value(&glean, "store1").unwrap());
// Ensure the 21st string wasn't added.
metric.add(&glean, "test_string");
assert_eq!(expected, metric.test_get_value(&glean, "store1").unwrap());
// Ensure we recorded the error.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidValue, None)
);
// Try to set it to a list that's too long. Ensure it cuts off at 20 elements.
let too_many: Vec<String> = "test_string "
.repeat(21)
.split_whitespace()
.map(|s| s.to_string())
.collect();
metric.set(&glean, too_many);
assert_eq!(expected, metric.test_get_value(&glean, "store1").unwrap());
assert_eq!(
Ok(2),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidValue, None)
);
}
#[test]
fn set_does_not_record_error_when_receiving_empty_list() {
let (glean, _t) = new_glean(None);
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set(&glean, vec![]);
// Ensure the empty list was added
assert_eq!(Some(vec![]), metric.test_get_value(&glean, "store1"));
// Ensure we didn't record an error.
assert!(
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidValue, None).is_err()
);
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, CommonMetricData, ErrorType, Lifetime};
#[test]
fn list_can_store_multiple_items() {
let (glean, _t) = new_glean(None);
let list: StringListMetric = StringListMetric::new(CommonMetricData {
name: "list".into(),
category: "local".into(),
send_in_pings: vec!["core".into()],
..Default::default()
});
list.add(&glean, "first");
assert_eq!(list.test_get_value(&glean, "core").unwrap(), vec!["first"]);
list.add(&glean, "second");
assert_eq!(
list.test_get_value(&glean, "core").unwrap(),
vec!["first", "second"]
);
list.set(&glean, vec!["third".into()]);
assert_eq!(list.test_get_value(&glean, "core").unwrap(), vec!["third"]);
list.add(&glean, "fourth");
assert_eq!(
list.test_get_value(&glean, "core").unwrap(),
vec!["third", "fourth"]
);
}
#[test]
fn stringlist_serializer_should_correctly_serialize_stringlists() {
let (mut tempdir, _) = tempdir();
{
// We give tempdir to the `new_glean` function...
let (glean, dir) = new_glean(Some(tempdir));
// And then we get it back once that function returns.
tempdir = dir;
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
metric.set(&glean, vec!["test_string_1".into(), "test_string_2".into()]);
}
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!({"string_list": {"telemetry.test.string_list_metric": ["test_string_1", "test_string_2"]}}),
snapshot
);
}
}
#[test]
fn set_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set(&glean, vec!["test_string_1".into(), "test_string_2".into()]);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!({"string_list": {"telemetry.test.string_list_metric": ["test_string_1", "test_string_2"]}}),
snapshot
);
}
}
#[test]
fn long_string_values_are_truncated() {
let (glean, _t) = new_glean(None);
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
let test_string = "0123456789".repeat(20);
metric.add(&glean, test_string.clone());
// Ensure the string was truncated to the proper length.
assert_eq!(
vec![test_string[..50].to_string()],
metric.test_get_value(&glean, "store1").unwrap()
);
// Ensure the error has been recorded.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow, None)
);
metric.set(&glean, vec![test_string.clone()]);
// Ensure the string was truncated to the proper length.
assert_eq!(
vec![test_string[..50].to_string()],
metric.test_get_value(&glean, "store1").unwrap()
);
// Ensure the error has been recorded.
assert_eq!(
Ok(2),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidOverflow, None)
);
}
#[test]
fn disabled_string_lists_dont_record() {
let (glean, _t) = new_glean(None);
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: true,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.add(&glean, "test_string".repeat(20));
// Ensure the string was not added.
assert_eq!(None, metric.test_get_value(&glean, "store1"));
metric.set(&glean, vec!["test_string_2".repeat(20)]);
// Ensure the stringlist was not set.
assert_eq!(None, metric.test_get_value(&glean, "store1"));
// Ensure no error was recorded.
assert!(
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidValue, None).is_err()
);
}
#[test]
fn string_lists_dont_exceed_max_items() {
let (glean, _t) = new_glean(None);
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
for _n in 1..21 {
metric.add(&glean, "test_string");
}
let expected: Vec<String> = "test_string "
.repeat(20)
.split_whitespace()
.map(|s| s.to_string())
.collect();
assert_eq!(expected, metric.test_get_value(&glean, "store1").unwrap());
// Ensure the 21st string wasn't added.
metric.add(&glean, "test_string");
assert_eq!(expected, metric.test_get_value(&glean, "store1").unwrap());
// Ensure we recorded the error.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidValue, None)
);
// Try to set it to a list that's too long. Ensure it cuts off at 20 elements.
let too_many: Vec<String> = "test_string "
.repeat(21)
.split_whitespace()
.map(|s| s.to_string())
.collect();
metric.set(&glean, too_many);
assert_eq!(expected, metric.test_get_value(&glean, "store1").unwrap());
assert_eq!(
Ok(2),
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidValue, None)
);
}
#[test]
fn set_does_not_record_error_when_receiving_empty_list() {
let (glean, _t) = new_glean(None);
let metric = StringListMetric::new(CommonMetricData {
name: "string_list_metric".into(),
category: "telemetry.test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
});
metric.set(&glean, vec![]);
// Ensure the empty list was added
assert_eq!(Some(vec![]), metric.test_get_value(&glean, "store1"));
// Ensure we didn't record an error.
assert!(
test_get_num_recorded_errors(&glean, metric.meta(), ErrorType::InvalidValue, None).is_err()
);
}

Просмотреть файл

@ -1,336 +1,336 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use std::time::Duration;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, ErrorType};
use glean_core::{CommonMetricData, Lifetime};
// Tests ported from glean-ac
#[test]
fn serializer_should_correctly_serialize_timing_distribution() {
let (mut tempdir, _) = tempdir();
let duration = 60;
let time_unit = TimeUnit::Nanosecond;
{
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
time_unit,
);
let id = metric.set_start(0);
metric.set_stop_and_accumulate(&glean, id, duration);
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(snapshot.sum, duration);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!(duration),
snapshot["timing_distribution"]["telemetry.distribution"]["sum"]
);
}
}
#[test]
fn set_value_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let duration = 1;
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
let id = metric.set_start(0);
metric.set_stop_and_accumulate(&glean, id, duration);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!(duration),
snapshot["timing_distribution"]["telemetry.distribution"]["sum"]
);
assert_eq!(
json!(1),
snapshot["timing_distribution"]["telemetry.distribution"]["values"]["1"]
);
}
}
#[test]
fn timing_distributions_must_not_accumulate_negative_values() {
let (glean, _t) = new_glean(None);
let duration = 60;
let time_unit = TimeUnit::Nanosecond;
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
time_unit,
);
// Flip around the timestamps, this should result in a negative value which should be
// discarded.
let id = metric.set_start(duration);
metric.set_stop_and_accumulate(&glean, id, 0);
assert!(metric.test_get_value(&glean, "store1").is_none());
// Make sure that the errors have been recorded
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
#[test]
fn the_accumulate_samples_api_correctly_stores_timing_values() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Second,
);
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let seconds_to_nanos = 1000 * 1000 * 1000;
// Check that we got the right sum and number of samples.
assert_eq!(snapshot.sum, 6 * seconds_to_nanos);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * seconds_to_nanos)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&984_625_593]);
assert_eq!(1, snapshot.values[&1_969_251_187]);
assert_eq!(1, snapshot.values[&2_784_941_737]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
.is_err());
}
#[test]
fn the_accumulate_samples_api_correctly_handles_negative_values() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in each of the first 3 buckets.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
#[test]
fn the_accumulate_samples_api_correctly_handles_overflowing_values() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
// The MAX_SAMPLE_TIME is the same from `metrics/timing_distribution.rs`.
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
let overflowing_val = MAX_SAMPLE_TIME as i64 + 1;
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [overflowing_val, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Overflowing values are truncated to MAX_SAMPLE_TIME and recorded.
assert_eq!(snapshot.sum, MAX_SAMPLE_TIME + 6);
// We should get a sample in each of the first 3 buckets.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidOverflow,
Some("store1")
)
);
}
#[test]
fn large_nanoseconds_values() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
let time = Duration::from_secs(10).as_nanos() as u64;
assert!(time > u64::from(u32::max_value()));
let id = metric.set_start(0);
metric.set_stop_and_accumulate(&glean, id, time);
let val = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(val.sum, time);
}
#[test]
fn stopping_non_existing_id_records_an_error() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "non_existing_id".into(),
category: "test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
metric.set_stop_and_accumulate(&glean, 3785, 60);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidState,
Some("store1")
)
);
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
mod common;
use crate::common::*;
use std::time::Duration;
use serde_json::json;
use glean_core::metrics::*;
use glean_core::storage::StorageManager;
use glean_core::{test_get_num_recorded_errors, ErrorType};
use glean_core::{CommonMetricData, Lifetime};
// Tests ported from glean-ac
#[test]
fn serializer_should_correctly_serialize_timing_distribution() {
let (mut tempdir, _) = tempdir();
let duration = 60;
let time_unit = TimeUnit::Nanosecond;
{
let (glean, dir) = new_glean(Some(tempdir));
tempdir = dir;
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
time_unit,
);
let id = metric.set_start(0);
metric.set_stop_and_accumulate(&glean, id, duration);
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
assert_eq!(snapshot.sum, duration);
}
// Make a new Glean instance here, which should force reloading of the data from disk
// so we can ensure it persisted, because it has User lifetime
{
let (glean, _) = new_glean(Some(tempdir));
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), "store1", true)
.unwrap();
assert_eq!(
json!(duration),
snapshot["timing_distribution"]["telemetry.distribution"]["sum"]
);
}
}
#[test]
fn set_value_properly_sets_the_value_in_all_stores() {
let (glean, _t) = new_glean(None);
let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
let duration = 1;
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: store_names.clone(),
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
let id = metric.set_start(0);
metric.set_stop_and_accumulate(&glean, id, duration);
for store_name in store_names {
let snapshot = StorageManager
.snapshot_as_json(glean.storage(), &store_name, true)
.unwrap();
assert_eq!(
json!(duration),
snapshot["timing_distribution"]["telemetry.distribution"]["sum"]
);
assert_eq!(
json!(1),
snapshot["timing_distribution"]["telemetry.distribution"]["values"]["1"]
);
}
}
#[test]
fn timing_distributions_must_not_accumulate_negative_values() {
let (glean, _t) = new_glean(None);
let duration = 60;
let time_unit = TimeUnit::Nanosecond;
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
time_unit,
);
// Flip around the timestamps, this should result in a negative value which should be
// discarded.
let id = metric.set_start(duration);
metric.set_stop_and_accumulate(&glean, id, 0);
assert!(metric.test_get_value(&glean, "store1").is_none());
// Make sure that the errors have been recorded
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
#[test]
fn the_accumulate_samples_api_correctly_stores_timing_values() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Second,
);
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
metric.accumulate_samples_signed(&glean, [1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
let seconds_to_nanos = 1000 * 1000 * 1000;
// Check that we got the right sum and number of samples.
assert_eq!(snapshot.sum, 6 * seconds_to_nanos);
// We should get a sample in 3 buckets.
// These numbers are a bit magic, but they correspond to
// `hist.sample_to_bucket_minimum(i * seconds_to_nanos)` for `i = 1..=3`.
assert_eq!(1, snapshot.values[&984_625_593]);
assert_eq!(1, snapshot.values[&1_969_251_187]);
assert_eq!(1, snapshot.values[&2_784_941_737]);
// No errors should be reported.
assert!(test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
.is_err());
}
#[test]
fn the_accumulate_samples_api_correctly_handles_negative_values() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [-1, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(snapshot.sum, 6);
// We should get a sample in each of the first 3 buckets.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidValue,
Some("store1")
)
);
}
#[test]
fn the_accumulate_samples_api_correctly_handles_overflowing_values() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
// The MAX_SAMPLE_TIME is the same from `metrics/timing_distribution.rs`.
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
let overflowing_val = MAX_SAMPLE_TIME as i64 + 1;
// Accumulate the samples.
metric.accumulate_samples_signed(&glean, [overflowing_val, 1, 2, 3].to_vec());
let snapshot = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Overflowing values are truncated to MAX_SAMPLE_TIME and recorded.
assert_eq!(snapshot.sum, MAX_SAMPLE_TIME + 6);
// We should get a sample in each of the first 3 buckets.
assert_eq!(1, snapshot.values[&1]);
assert_eq!(1, snapshot.values[&2]);
assert_eq!(1, snapshot.values[&3]);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidOverflow,
Some("store1")
)
);
}
#[test]
fn large_nanoseconds_values() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "distribution".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
let time = Duration::from_secs(10).as_nanos() as u64;
assert!(time > u64::from(u32::max_value()));
let id = metric.set_start(0);
metric.set_stop_and_accumulate(&glean, id, time);
let val = metric
.test_get_value(&glean, "store1")
.expect("Value should be stored");
// Check that we got the right sum and number of samples.
assert_eq!(val.sum, time);
}
#[test]
fn stopping_non_existing_id_records_an_error() {
let (glean, _t) = new_glean(None);
let mut metric = TimingDistributionMetric::new(
CommonMetricData {
name: "non_existing_id".into(),
category: "test".into(),
send_in_pings: vec!["store1".into()],
disabled: false,
lifetime: Lifetime::Ping,
..Default::default()
},
TimeUnit::Nanosecond,
);
metric.set_stop_and_accumulate(&glean, 3785, 60);
// 1 error should be reported.
assert_eq!(
Ok(1),
test_get_num_recorded_errors(
&glean,
metric.meta(),
ErrorType::InvalidState,
Some("store1")
)
);
}

2
third_party/rust/glean/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"844796de9e9ae84430437001e4128aca390d634de1bcc9e9393e318f47539efc","LICENSE":"2684de17300e0a434686f1ec7f8af6045207a4b457a3fe04b2b9ce655e7c5d50","README.md":"7bfc89b6f6f0b7d1ca202ec0e24dc74fb2b2dfdea895b1d729d608f7f0ced5d7","src/common_test.rs":"a27d62d2de94e67f1e795a1e2cf9a08b0da24ca41fdcb2b2c17555ce27d97950","src/configuration.rs":"115f8f08058791c579828914ae03719d92d55b14206186c04ba74546dead2b6f","src/core_metrics.rs":"b07cfcb8e23a7b2eed1e6e21a44b9319b55bb8b1e86954fae7005ef27cafbcc6","src/dispatcher/global.rs":"ac32e02349ed1befca11ba09039f31983131d9daf2b169cdb210487915e88470","src/dispatcher/mod.rs":"dfbdf320e52b9273b6795f9a49eaa2e696308df5135243e64475039f6a81d4cf","src/glean_metrics.rs":"68cc4760363de5d5f986fe91628ec0ae2935f4fbd6a3af3d5100f4db3c20377d","src/lib.rs":"6a41c6b9f1df9e7a0d4ca35773651a034c34ae1bd667b31f755d635f761c1649","src/net/http_uploader.rs":"9ac1fa57f87093a61f810f3d484766d2e8a1504f63b4581f1b9c521b084000bc","src/net/mod.rs":"4ea59a2e450a4af66aa7ead237731f1b9333c8f9351adafac01270bba7c79d6d","src/pings.rs":"03e1d55aa1c2bcb7ab79c72ae7c3a02eb1e082bfd4012f7bd5c8ebdcf2ee8eb6","src/private/boolean.rs":"df583a3dca03566d71def9b4138fc77c5d749de524d22f8826015c31e0d3d81f","src/private/counter.rs":"20eb3a9e4454759b4769890f2c0556e7be6b84875ffd0a3babdae43d7091067d","src/private/labeled.rs":"afaece046cb0df9c5a5c01fb96b9a5d6791a8e757727259893a8b035023888a6","src/private/mod.rs":"92ccbe5929d2f31de040433d09b2e7c214f2a588a513206d9d4eeee192602ea4","src/private/ping.rs":"845ebbac3b956a3ccf323d12cd5ebfd518d9925647090e23eb70f00496f89b84","src/private/quantity.rs":"50de8becc001601634acdb3f75dc1567e5451cb637d850ee848680c1e2dc23dd","src/private/recorded_experiment_data.rs":"321f9a8fd77b69c8a33d9339facc6c7eaa990e3cd2cca8b3933650fef3e4b16c","src/private/string.rs":"9ff0d4fa6cbbf2b8762c2d7d869628ea84df8c7e6192e09ef9c272ce2eb46256","src/private/uuid.rs":"e18432e3b344ea49391e58a96385c35c57911d3271ee8ba53732a80b4c4d8f8c","src/system.rs":"094f44bdb99aa581f7482b4007237881b1fecafa5e6fec64ea586f76e17894cc","src/test.rs":"75e450c8b02b41717052879012f39b716c750833fb72c67407a61deb2dade0fb","tests/schema.rs":"4b3f0a2a2dbe83240bb6511f33a13ddf38b3b299000959b2a7ca56e63f35c073"},"package":"b1d5556ec294a763121f57384cf09be9b7f8eebbfc075040f9120b84f6a1160b"}
{"files":{"Cargo.toml":"0cf7769b2e228dbe53b2062492e86c2fe0d49f57f3b55a955708ccfbcf87686f","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"fd9e0ca6907917ea6bec5de05e15dd21d20fae1cb7f3250467bb20231a8e1065","src/configuration.rs":"b8747397761a9cf6dc64150855b75fd8e48dfe9951ce69e25d646b3a6f46456f","src/core_metrics.rs":"e20697e04f707c34c3c7a0cc4e2ed93e638d3028f03eb75a93f53ae722243986","src/dispatcher/global.rs":"7a2cd33616cbb5d86df617c5dcce50a0149c250e4acbfffb6037618cc722065b","src/dispatcher/mod.rs":"202a1de03bbaff76d1c41b8859f0ac409b0b40f426e22c8b4c0d642a07c2ebf5","src/glean_metrics.rs":"a5e1ea9c4dccb81aec4aa584bd76cf47e916c66af4aff4a0ef5aa297ee2d9aa3","src/lib.rs":"e13ee2939efe7e10f8fc2e921ee91f63e541fa322b3bb690f995c86011815a0d","src/net/http_uploader.rs":"9e8c1837ca0d3f6ea165ec936ab054173c4fe95a958710176c33b4d4d1d98beb","src/net/mod.rs":"ae1f5fcba401b0ebc31a078c96624ad03a39fb359aa0d8182beae32733375554","src/pings.rs":"2dfccd84848e1933aa4f6a7a707c58ec794c8f73ef2d93ea4d4df71d4e6abc31","src/private/boolean.rs":"2ead8da55eca0c8738f3c07445b46b1efa706b3e8a1e60428347e9fcb1d1fd3f","src/private/counter.rs":"b7e9f943d25dfb0cb5df797556dec5193606e3ab126e35fc8b6053530618fd0f","src/private/mod.rs":"d084cf3e1a69a6f1c8602ec86338d7161935b3390580875d57f36040a23a066d","src/private/ping.rs":"a837bc63436fb56ca0fac41c9139c72b8580941d7e8014004be353873a28ac77","src/private/recorded_experiment_data.rs":"3450e6abb48fc1ca9c11d42ef209e4d9b87ccbca0baf8893381ce7c62681f833","src/private/string.rs":"585cd5276bd1ea2570153ee72d53191def198b2618fda3aae3f8332af5651fa8","src/system.rs":"ba7b3eac040abe4691d9d287562ddca6d7e92a6d6109c3f0c443b707a100d75a","src/test.rs":"942f36b3ea18c33000f77e2fdd8194212d7685d5449728707bd36b6f2d5e35fd","tests/schema.rs":"b5acf42de034626f2b7e61fec9709a00370ce80ae3b2bab4db9fc79a20ea5f31"},"package":"8f52254ae2baf857eec45b424a0d2dfe6ac63f353b594cfa4bee033f8386b25c"}

8
third_party/rust/glean/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "glean"
version = "33.5.0"
version = "33.4.0"
authors = ["Jan-Erik Rediger <jrediger@mozilla.com>", "The Glean Team <glean-team@mozilla.com>"]
include = ["/README.md", "/LICENSE", "/src", "/tests", "/Cargo.toml"]
description = "Glean SDK Rust language bindings"
@ -25,7 +25,7 @@ repository = "https://github.com/mozilla/glean"
version = "0.4.3"
[dependencies.glean-core]
version = "33.5.0"
version = "33.4.0"
[dependencies.inherent]
version = "0.1.4"
@ -45,10 +45,6 @@ version = "1.0.44"
[dependencies.thiserror]
version = "1.0.4"
[dependencies.uuid]
version = "0.8.1"
features = ["v4"]
[dev-dependencies.env_logger]
version = "0.7.1"
features = ["termcolor", "atty", "humantime"]

746
third_party/rust/glean/LICENSE поставляемый
Просмотреть файл

@ -1,373 +1,373 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

92
third_party/rust/glean/README.md поставляемый
Просмотреть файл

@ -1,46 +1,46 @@
# glean
The `Glean SDK` is a modern approach for a Telemetry library and is part of the [Glean project](https://docs.telemetry.mozilla.org/concepts/glean/glean.html).
## `glean`
This library provides a Rust language bindings on top of `glean-core`, targeted to Rust consumers.
**Note: `glean` is currently under development and not yet ready for use.**
## Documentation
All documentation is available online:
* [The Glean SDK Book][book]
* [API documentation][apidocs]
[book]: https://mozilla.github.io/glean/
[apidocs]: https://mozilla.github.io/glean/docs/glean_preview/index.html
## Example
```rust,no_run
use glean::{Configuration, Error, metrics::*};
let cfg = Configuration {
data_path: "/tmp/data".into(),
application_id: "org.mozilla.glean_core.example".into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
};
glean::initialize(cfg)?;
let prototype_ping = PingType::new("prototype", true, true, vec![]);
glean::register_ping_type(&prototype_ping);
prototype_ping.submit(None);
```
## License
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/
# glean
The `Glean SDK` is a modern approach for a Telemetry library and is part of the [Glean project](https://docs.telemetry.mozilla.org/concepts/glean/glean.html).
## `glean`
This library provides a Rust language bindings on top of `glean-core`, targeted to Rust consumers.
**Note: `glean` is currently under development and not yet ready for use.**
## Documentation
All documentation is available online:
* [The Glean SDK Book][book]
* [API documentation][apidocs]
[book]: https://mozilla.github.io/glean/
[apidocs]: https://mozilla.github.io/glean/docs/glean_preview/index.html
## Example
```rust,no_run
use glean::{Configuration, Error, metrics::*};
let cfg = Configuration {
data_path: "/tmp/data".into(),
application_id: "org.mozilla.glean_core.example".into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
};
glean::initialize(cfg)?;
let prototype_ping = PingType::new("prototype", true, true, vec![]);
glean::register_ping_type(&prototype_ping);
prototype_ping.submit(None);
```
## License
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/

57
third_party/rust/glean/src/common_test.rs поставляемый
Просмотреть файл

@ -1,57 +0,0 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::ClientInfoMetrics;
use crate::Configuration;
use std::sync::{Mutex, MutexGuard};
use once_cell::sync::Lazy;
pub(crate) const GLOBAL_APPLICATION_ID: &str = "org.mozilla.rlb.test";
// Because Glean uses a global-singleton, we need to run the tests one-by-one to
// avoid different tests stomping over each other.
// This is only an issue because we're resetting Glean, this cannot happen in normal
// use of the RLB.
//
// We use a global lock to force synchronization of all tests, even if run multi-threaded.
// This allows us to run without `--test-threads 1`.`
pub(crate) fn lock_test() -> MutexGuard<'static, ()> {
static GLOBAL_LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
// This is going to be called from all the tests: make sure
// to enable logging.
env_logger::try_init().ok();
let lock = GLOBAL_LOCK.lock().unwrap();
lock
}
// Create a new instance of Glean with a temporary directory.
// We need to keep the `TempDir` alive, so that it's not deleted before we stop using it.
pub(crate) fn new_glean(
configuration: Option<Configuration>,
clear_stores: bool,
) -> tempfile::TempDir {
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = match configuration {
Some(c) => c,
None => Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
};
crate::test_reset_glean(cfg, ClientInfoMetrics::unknown(), clear_stores);
dir
}

62
third_party/rust/glean/src/configuration.rs поставляемый
Просмотреть файл

@ -1,31 +1,31 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::net::PingUploader;
/// The default server pings are sent to.
pub(crate) const DEFAULT_GLEAN_ENDPOINT: &str = "https://incoming.telemetry.mozilla.org";
/// The Glean configuration.
///
/// Optional values will be filled in with default values.
#[derive(Debug)]
pub struct Configuration {
/// Whether upload should be enabled.
pub upload_enabled: bool,
/// Path to a directory to store all data in.
pub data_path: String,
/// The application ID (will be sanitized during initialization).
pub application_id: String,
/// The maximum number of events to store before sending a ping containing events.
pub max_events: Option<usize>,
/// Whether Glean should delay persistence of data from metrics with ping lifetime.
pub delay_ping_lifetime_io: bool,
/// The release channel the application is on, if known.
pub channel: Option<String>,
/// The server pings are sent to.
pub server_endpoint: Option<String>,
/// The instance of the uploader used to send pings.
pub uploader: Option<Box<dyn PingUploader + 'static>>,
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::net::PingUploader;
/// The default server pings are sent to.
pub(crate) const DEFAULT_GLEAN_ENDPOINT: &str = "https://incoming.telemetry.mozilla.org";
/// The Glean configuration.
///
/// Optional values will be filled in with default values.
#[derive(Debug)]
pub struct Configuration {
/// Whether upload should be enabled.
pub upload_enabled: bool,
/// Path to a directory to store all data in.
pub data_path: String,
/// The application ID (will be sanitized during initialization).
pub application_id: String,
/// The maximum number of events to store before sending a ping containing events.
pub max_events: Option<usize>,
/// Whether Glean should delay persistence of data from metrics with ping lifetime.
pub delay_ping_lifetime_io: bool,
/// The release channel the application is on, if known.
pub channel: Option<String>,
/// The server pings are sent to.
pub server_endpoint: Option<String>,
/// The instance of the uploader used to send pings.
pub uploader: Option<Box<dyn PingUploader + 'static>>,
}

196
third_party/rust/glean/src/core_metrics.rs поставляемый
Просмотреть файл

@ -1,98 +1,98 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use glean_core::{metrics::StringMetric, CommonMetricData, Lifetime};
/// Metrics included in every ping as `client_info`.
#[derive(Debug)]
pub struct ClientInfoMetrics {
/// The build identifier generated by the CI system (e.g. "1234/A").
pub app_build: String,
/// The user visible version string (e.g. "1.0.3").
pub app_display_version: String,
}
impl ClientInfoMetrics {
/// Creates the client info with dummy values for all.
pub fn unknown() -> Self {
ClientInfoMetrics {
app_build: "unknown".to_string(),
app_display_version: "unknown".to_string(),
}
}
}
#[derive(Debug)]
pub struct InternalMetrics {
pub app_build: StringMetric,
pub app_display_version: StringMetric,
pub app_channel: StringMetric,
pub os_version: StringMetric,
pub architecture: StringMetric,
pub device_manufacturer: StringMetric,
pub device_model: StringMetric,
}
impl InternalMetrics {
pub fn new() -> Self {
Self {
app_build: StringMetric::new(CommonMetricData {
name: "app_build".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
app_display_version: StringMetric::new(CommonMetricData {
name: "app_display_version".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
app_channel: StringMetric::new(CommonMetricData {
name: "app_channel".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
os_version: StringMetric::new(CommonMetricData {
name: "os_version".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
architecture: StringMetric::new(CommonMetricData {
name: "architecture".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
device_manufacturer: StringMetric::new(CommonMetricData {
name: "device_manufacturer".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
device_model: StringMetric::new(CommonMetricData {
name: "device_model".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use glean_core::{metrics::StringMetric, CommonMetricData, Lifetime};
/// Metrics included in every ping as `client_info`.
#[derive(Debug)]
pub struct ClientInfoMetrics {
/// The build identifier generated by the CI system (e.g. "1234/A").
pub app_build: String,
/// The user visible version string (e.g. "1.0.3").
pub app_display_version: String,
}
impl ClientInfoMetrics {
/// Creates the client info with dummy values for all.
pub fn unknown() -> Self {
ClientInfoMetrics {
app_build: "unknown".to_string(),
app_display_version: "unknown".to_string(),
}
}
}
#[derive(Debug)]
pub struct InternalMetrics {
pub app_build: StringMetric,
pub app_display_version: StringMetric,
pub app_channel: StringMetric,
pub os_version: StringMetric,
pub architecture: StringMetric,
pub device_manufacturer: StringMetric,
pub device_model: StringMetric,
}
impl InternalMetrics {
pub fn new() -> Self {
Self {
app_build: StringMetric::new(CommonMetricData {
name: "app_build".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
app_display_version: StringMetric::new(CommonMetricData {
name: "app_display_version".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
app_channel: StringMetric::new(CommonMetricData {
name: "app_channel".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
os_version: StringMetric::new(CommonMetricData {
name: "os_version".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
architecture: StringMetric::new(CommonMetricData {
name: "architecture".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
device_manufacturer: StringMetric::new(CommonMetricData {
name: "device_manufacturer".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
device_model: StringMetric::new(CommonMetricData {
name: "device_model".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
}
}
}

Просмотреть файл

@ -1,170 +1,170 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use once_cell::sync::Lazy;
use std::{mem, sync::RwLock};
use super::{DispatchError, DispatchGuard, Dispatcher};
const GLOBAL_DISPATCHER_LIMIT: usize = 100;
static GLOBAL_DISPATCHER: Lazy<RwLock<Option<Dispatcher>>> =
Lazy::new(|| RwLock::new(Some(Dispatcher::new(GLOBAL_DISPATCHER_LIMIT))));
/// Get a dispatcher for the global queue.
///
/// A dispatcher is cheap to create, so we create one on every access instead of caching it.
/// This avoids troubles for tests where the global dispatcher _can_ change.
fn guard() -> DispatchGuard {
GLOBAL_DISPATCHER
.read()
.unwrap()
.as_ref()
.map(|dispatcher| dispatcher.guard())
.unwrap()
}
/// Launches a new task on the global dispatch queue.
///
/// The new task will be enqueued immediately.
/// If the pre-init queue was already flushed,
/// the background thread will process tasks in the queue (see [`flush_init`]).
///
/// This will not block.
///
/// [`flush_init`]: fn.flush_init.html
pub fn launch(task: impl FnOnce() + Send + 'static) {
match guard().launch(task) {
Ok(_) => {}
Err(DispatchError::QueueFull) => {
log::info!("Exceeded maximum queue size, discarding task");
// TODO: Record this as an error.
}
Err(_) => {
log::info!("Failed to launch a task on the queue. Discarding task.");
}
}
}
/// Block until all tasks prior to this call are processed.
pub fn block_on_queue() {
guard().block_on_queue();
}
/// Starts processing queued tasks in the global dispatch queue.
///
/// This function blocks until queued tasks prior to this call are finished.
/// Once the initial queue is empty the dispatcher will wait for new tasks to be launched.
pub fn flush_init() -> Result<(), DispatchError> {
guard().flush_init()
}
/// Shuts down the dispatch queue.
///
/// This will initiate a shutdown of the worker thread
/// and no new tasks will be processed after this.
/// It will not block on the worker thread.
pub fn try_shutdown() -> Result<(), DispatchError> {
guard().shutdown()
}
/// TEST ONLY FUNCTION.
/// Resets the Glean state and triggers init again.
pub(crate) fn reset_dispatcher() {
// We don't care about shutdown errors, since they will
// definitely happen if this
let _ = try_shutdown();
// Now that the dispatcher is shut down, replace it.
// For that we
// 1. Create a new
// 2. Replace the global one
// 3. Wait for the old one to fully finish
// 4. Only then return (and thus release the lock)
let mut lock = GLOBAL_DISPATCHER.write().unwrap();
let new_dispatcher = Some(Dispatcher::new(GLOBAL_DISPATCHER_LIMIT));
let old_dispatcher = mem::replace(&mut *lock, new_dispatcher);
old_dispatcher.map(|d| d.join());
}
#[cfg(test)]
mod test {
use std::sync::{Arc, Mutex};
use super::*;
#[test]
#[ignore] // We can't reset the queue at the moment, so filling it up breaks other tests.
fn global_fills_up_in_order_and_works() {
let _ = env_logger::builder().is_test(true).try_init();
let result = Arc::new(Mutex::new(vec![]));
for i in 1..=GLOBAL_DISPATCHER_LIMIT {
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(i);
});
}
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(150);
});
}
flush_init().unwrap();
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(200);
});
}
block_on_queue();
let mut expected = (1..=GLOBAL_DISPATCHER_LIMIT).collect::<Vec<_>>();
expected.push(200);
assert_eq!(&*result.lock().unwrap(), &expected);
}
#[test]
#[ignore] // We can't reset the queue at the moment, so flushing it breaks other tests.
fn global_nested_calls() {
let _ = env_logger::builder().is_test(true).try_init();
let result = Arc::new(Mutex::new(vec![]));
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(1);
});
}
flush_init().unwrap();
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(21);
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(3);
});
}
result.lock().unwrap().push(22);
});
}
block_on_queue();
let expected = vec![1, 21, 22, 3];
assert_eq!(&*result.lock().unwrap(), &expected);
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use once_cell::sync::Lazy;
use std::{mem, sync::RwLock};
use super::{DispatchError, DispatchGuard, Dispatcher};
const GLOBAL_DISPATCHER_LIMIT: usize = 100;
static GLOBAL_DISPATCHER: Lazy<RwLock<Option<Dispatcher>>> =
Lazy::new(|| RwLock::new(Some(Dispatcher::new(GLOBAL_DISPATCHER_LIMIT))));
/// Get a dispatcher for the global queue.
///
/// A dispatcher is cheap to create, so we create one on every access instead of caching it.
/// This avoids troubles for tests where the global dispatcher _can_ change.
fn guard() -> DispatchGuard {
GLOBAL_DISPATCHER
.read()
.unwrap()
.as_ref()
.map(|dispatcher| dispatcher.guard())
.unwrap()
}
/// Launches a new task on the global dispatch queue.
///
/// The new task will be enqueued immediately.
/// If the pre-init queue was already flushed,
/// the background thread will process tasks in the queue (see [`flush_init`]).
///
/// This will not block.
///
/// [`flush_init`]: fn.flush_init.html
pub fn launch(task: impl FnOnce() + Send + 'static) {
match guard().launch(task) {
Ok(_) => {}
Err(DispatchError::QueueFull) => {
log::info!("Exceeded maximum queue size, discarding task");
// TODO: Record this as an error.
}
Err(_) => {
log::info!("Failed to launch a task on the queue. Discarding task.");
}
}
}
/// Block until all tasks prior to this call are processed.
pub fn block_on_queue() {
guard().block_on_queue();
}
/// Starts processing queued tasks in the global dispatch queue.
///
/// This function blocks until queued tasks prior to this call are finished.
/// Once the initial queue is empty the dispatcher will wait for new tasks to be launched.
pub fn flush_init() -> Result<(), DispatchError> {
guard().flush_init()
}
/// Shuts down the dispatch queue.
///
/// This will initiate a shutdown of the worker thread
/// and no new tasks will be processed after this.
/// It will not block on the worker thread.
pub fn try_shutdown() -> Result<(), DispatchError> {
guard().shutdown()
}
/// TEST ONLY FUNCTION.
/// Resets the Glean state and triggers init again.
pub(crate) fn reset_dispatcher() {
// We don't care about shutdown errors, since they will
// definitely happen if this
let _ = try_shutdown();
// Now that the dispatcher is shut down, replace it.
// For that we
// 1. Create a new
// 2. Replace the global one
// 3. Wait for the old one to fully finish
// 4. Only then return (and thus release the lock)
let mut lock = GLOBAL_DISPATCHER.write().unwrap();
let new_dispatcher = Some(Dispatcher::new(GLOBAL_DISPATCHER_LIMIT));
let old_dispatcher = mem::replace(&mut *lock, new_dispatcher);
old_dispatcher.map(|d| d.join());
}
#[cfg(test)]
mod test {
use std::sync::{Arc, Mutex};
use super::*;
#[test]
#[ignore] // We can't reset the queue at the moment, so filling it up breaks other tests.
fn global_fills_up_in_order_and_works() {
let _ = env_logger::builder().is_test(true).try_init();
let result = Arc::new(Mutex::new(vec![]));
for i in 1..=GLOBAL_DISPATCHER_LIMIT {
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(i);
});
}
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(150);
});
}
flush_init().unwrap();
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(200);
});
}
block_on_queue();
let mut expected = (1..=GLOBAL_DISPATCHER_LIMIT).collect::<Vec<_>>();
expected.push(200);
assert_eq!(&*result.lock().unwrap(), &expected);
}
#[test]
#[ignore] // We can't reset the queue at the moment, so flushing it breaks other tests.
fn global_nested_calls() {
let _ = env_logger::builder().is_test(true).try_init();
let result = Arc::new(Mutex::new(vec![]));
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(1);
});
}
flush_init().unwrap();
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(21);
{
let result = Arc::clone(&result);
launch(move || {
result.lock().unwrap().push(3);
});
}
result.lock().unwrap().push(22);
});
}
block_on_queue();
let expected = vec![1, 21, 22, 3];
assert_eq!(&*result.lock().unwrap(), &expected);
}
}

1040
third_party/rust/glean/src/dispatcher/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

20
third_party/rust/glean/src/glean_metrics.rs поставляемый
Просмотреть файл

@ -1,10 +1,10 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// ** IMPORTANT **
//
// This file is required in order to include the ones generated by
// 'glean-parser' from the SDK registry files.
include!(concat!("pings.rs"));
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// ** IMPORTANT **
//
// This file is required in order to include the ones generated by
// 'glean-parser' from the SDK registry files.
include!(concat!("pings.rs"));

1183
third_party/rust/glean/src/lib.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,24 +1,24 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::net::{PingUploader, UploadResult};
/// A simple mechanism to upload pings over HTTPS.
#[derive(Debug)]
pub struct HttpUploader;
impl PingUploader for HttpUploader {
/// Uploads a ping to a server.
///
/// # Arguments
///
/// * `url` - the URL path to upload the data to.
/// * `body` - the serialized text data to send.
/// * `headers` - a vector of tuples containing the headers to send with
/// the request, i.e. (Name, Value).
fn upload(&self, url: String, _body: Vec<u8>, _headers: Vec<(String, String)>) -> UploadResult {
log::debug!("TODO bug 1675468: submitting to {:?}", url);
UploadResult::HttpStatus(200)
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::net::{PingUploader, UploadResult};
/// A simple mechanism to upload pings over HTTPS.
#[derive(Debug)]
pub struct HttpUploader;
impl PingUploader for HttpUploader {
/// Uploads a ping to a server.
///
/// # Arguments
///
/// * `url` - the URL path to upload the data to.
/// * `body` - the serialized text data to send.
/// * `headers` - a vector of tuples containing the headers to send with
/// the request, i.e. (Name, Value).
fn upload(&self, url: String, _body: Vec<u8>, _headers: Vec<(String, String)>) -> UploadResult {
log::debug!("TODO bug 1675468: submitting to {:?}", url);
UploadResult::HttpStatus(200)
}
}

222
third_party/rust/glean/src/net/mod.rs поставляемый
Просмотреть файл

@ -1,111 +1,111 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Handling the Glean upload logic.
//!
//! This doesn't perform the actual upload but rather handles
//! retries, upload limitations and error tracking.
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::thread;
use std::time::Duration;
use crate::with_glean;
use glean_core::upload::PingUploadTask;
pub use glean_core::upload::{PingRequest, UploadResult};
pub use http_uploader::*;
mod http_uploader;
/// The duration the uploader thread should sleep, when told to by glean-core.
const THROTTLE_BACKOFF_TIME: Duration = Duration::from_secs(60);
/// A description of a component used to upload pings.
pub trait PingUploader: std::fmt::Debug + Send + Sync {
/// Uploads a ping to a server.
///
/// # Arguments
///
/// * `url` - the URL path to upload the data to.
/// * `body` - the serialized text data to send.
/// * `headers` - a vector of tuples containing the headers to send with
/// the request, i.e. (Name, Value).
fn upload(&self, url: String, body: Vec<u8>, headers: Vec<(String, String)>) -> UploadResult;
}
/// The logic for uploading pings: this leaves the actual upload mechanism as
/// a detail of the user-provided object implementing `PingUploader`.
pub(crate) struct UploadManager {
inner: Arc<Inner>,
}
struct Inner {
server_endpoint: String,
uploader: Box<dyn PingUploader + 'static>,
thread_running: AtomicBool,
}
impl UploadManager {
/// Create a new instance of the upload manager.
///
/// # Arguments
///
/// * `server_endpoint` - the server pings are sent to.
/// * `new_uploader` - the instance of the uploader used to send pings.
pub(crate) fn new(
server_endpoint: String,
new_uploader: Box<dyn PingUploader + 'static>,
) -> Self {
Self {
inner: Arc::new(Inner {
server_endpoint,
uploader: new_uploader,
thread_running: AtomicBool::new(false),
}),
}
}
/// Signals Glean to upload pings at the next best opportunity.
pub(crate) fn trigger_upload(&self) {
if self.inner.thread_running.load(Ordering::SeqCst) {
log::debug!("The upload task is already running.");
return;
}
let inner = Arc::clone(&self.inner);
thread::spawn(move || {
// Mark the uploader as running.
inner.thread_running.store(true, Ordering::SeqCst);
loop {
let incoming_task = with_glean(|glean| glean.get_upload_task());
match incoming_task {
PingUploadTask::Upload(request) => {
let doc_id = request.document_id.clone();
let upload_url = format!("{}{}", inner.server_endpoint, request.path);
let headers: Vec<(String, String)> = request.headers.into_iter().collect();
let result = inner.uploader.upload(upload_url, request.body, headers);
// Process the upload response.
with_glean(|glean| glean.process_ping_upload_response(&doc_id, result));
}
PingUploadTask::Wait => {
thread::sleep(THROTTLE_BACKOFF_TIME);
}
PingUploadTask::Done => {
// Nothing to do here, break out of the loop and clear the
// running flag.
inner.thread_running.store(false, Ordering::SeqCst);
return;
}
}
}
});
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! Handling the Glean upload logic.
//!
//! This doesn't perform the actual upload but rather handles
//! retries, upload limitations and error tracking.
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::thread;
use std::time::Duration;
use crate::with_glean;
use glean_core::upload::PingUploadTask;
pub use glean_core::upload::{PingRequest, UploadResult};
pub use http_uploader::*;
mod http_uploader;
/// The duration the uploader thread should sleep, when told to by glean-core.
const THROTTLE_BACKOFF_TIME: Duration = Duration::from_secs(60);
/// A description of a component used to upload pings.
pub trait PingUploader: std::fmt::Debug + Send + Sync {
/// Uploads a ping to a server.
///
/// # Arguments
///
/// * `url` - the URL path to upload the data to.
/// * `body` - the serialized text data to send.
/// * `headers` - a vector of tuples containing the headers to send with
/// the request, i.e. (Name, Value).
fn upload(&self, url: String, body: Vec<u8>, headers: Vec<(String, String)>) -> UploadResult;
}
/// The logic for uploading pings: this leaves the actual upload mechanism as
/// a detail of the user-provided object implementing `PingUploader`.
pub(crate) struct UploadManager {
inner: Arc<Inner>,
}
struct Inner {
server_endpoint: String,
uploader: Box<dyn PingUploader + 'static>,
thread_running: AtomicBool,
}
impl UploadManager {
/// Create a new instance of the upload manager.
///
/// # Arguments
///
/// * `server_endpoint` - the server pings are sent to.
/// * `new_uploader` - the instance of the uploader used to send pings.
pub(crate) fn new(
server_endpoint: String,
new_uploader: Box<dyn PingUploader + 'static>,
) -> Self {
Self {
inner: Arc::new(Inner {
server_endpoint,
uploader: new_uploader,
thread_running: AtomicBool::new(false),
}),
}
}
/// Signals Glean to upload pings at the next best opportunity.
pub(crate) fn trigger_upload(&self) {
if self.inner.thread_running.load(Ordering::SeqCst) {
log::debug!("The upload task is already running.");
return;
}
let inner = Arc::clone(&self.inner);
thread::spawn(move || {
// Mark the uploader as running.
inner.thread_running.store(true, Ordering::SeqCst);
loop {
let incoming_task = with_glean(|glean| glean.get_upload_task());
match incoming_task {
PingUploadTask::Upload(request) => {
let doc_id = request.document_id.clone();
let upload_url = format!("{}{}", inner.server_endpoint, request.path);
let headers: Vec<(String, String)> = request.headers.into_iter().collect();
let result = inner.uploader.upload(upload_url, request.body, headers);
// Process the upload response.
with_glean(|glean| glean.process_ping_upload_response(&doc_id, result));
}
PingUploadTask::Wait => {
thread::sleep(THROTTLE_BACKOFF_TIME);
}
PingUploadTask::Done => {
// Nothing to do here, break out of the loop and clear the
// running flag.
inner.thread_running.store(false, Ordering::SeqCst);
return;
}
}
}
});
}
}

114
third_party/rust/glean/src/pings.rs поставляемый
Просмотреть файл

@ -1,57 +1,57 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// ** IMPORTANT **
//
// This file is *temporary*, it will be generated by 'glean-parser'
// from the SDK registry files in the long run.
pub mod pings {
use crate::private::PingType;
use once_cell::sync::Lazy;
#[allow(non_upper_case_globals)]
pub static baseline: Lazy<PingType> = Lazy::new(|| {
PingType::new(
"baseline",
true,
true,
vec![
"background".to_string(),
"dirty_startup".to_string(),
"foreground".to_string()
]
)
});
#[allow(non_upper_case_globals)]
pub static metrics: Lazy<PingType> = Lazy::new(|| {
PingType::new(
"metrics",
true,
false,
vec![
"overdue".to_string(),
"reschedule".to_string(),
"today".to_string(),
"tomorrow".to_string(),
"upgrade".to_string()
]
)
});
#[allow(non_upper_case_globals)]
pub static events: Lazy<PingType> = Lazy::new(|| {
PingType::new(
"metrics",
true,
false,
vec![
"background".to_string(),
"max_capacity".to_string(),
"startup".to_string()
]
)
});
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
// ** IMPORTANT **
//
// This file is *temporary*, it will be generated by 'glean-parser'
// from the SDK registry files in the long run.
pub mod pings {
use crate::private::PingType;
use once_cell::sync::Lazy;
#[allow(non_upper_case_globals)]
pub static baseline: Lazy<PingType> = Lazy::new(|| {
PingType::new(
"baseline",
true,
true,
vec![
"background".to_string(),
"dirty_startup".to_string(),
"foreground".to_string()
]
)
});
#[allow(non_upper_case_globals)]
pub static metrics: Lazy<PingType> = Lazy::new(|| {
PingType::new(
"metrics",
true,
false,
vec![
"overdue".to_string(),
"reschedule".to_string(),
"today".to_string(),
"tomorrow".to_string(),
"upgrade".to_string()
]
)
});
#[allow(non_upper_case_globals)]
pub static events: Lazy<PingType> = Lazy::new(|| {
PingType::new(
"metrics",
true,
false,
vec![
"background".to_string(),
"max_capacity".to_string(),
"startup".to_string()
]
)
});
}

128
third_party/rust/glean/src/private/boolean.rs поставляемый
Просмотреть файл

@ -1,64 +1,64 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use crate::dispatcher;
// We need to wrap the glean-core type: otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the developer facing API for recording boolean metrics.
///
/// Instances of this class type are automatically generated by the parsers
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
#[derive(Clone)]
pub struct BooleanMetric(pub(crate) Arc<glean_core::metrics::BooleanMetric>);
impl BooleanMetric {
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData) -> Self {
Self(Arc::new(glean_core::metrics::BooleanMetric::new(meta)))
}
}
#[inherent(pub)]
impl glean_core::traits::Boolean for BooleanMetric {
/// Sets to the specified boolean value.
///
/// # Arguments
///
/// * `value` - the value to set.
fn set(&self, value: bool) {
let metric = Arc::clone(&self.0);
dispatcher::launch(move || crate::with_glean(|glean| metric.set(glean, value)));
}
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a boolean.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<bool> {
dispatcher::block_on_queue();
let queried_ping_name = match ping_name.into() {
Some(name) => name,
None => self.0.meta().send_in_pings.first().unwrap(),
};
crate::with_glean(|glean| self.0.test_get_value(glean, queried_ping_name))
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use crate::dispatcher;
// We need to wrap the glean-core type: otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the developer facing API for recording boolean metrics.
///
/// Instances of this class type are automatically generated by the parsers
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
#[derive(Clone)]
pub struct BooleanMetric(pub(crate) Arc<glean_core::metrics::BooleanMetric>);
impl BooleanMetric {
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData) -> Self {
Self(Arc::new(glean_core::metrics::BooleanMetric::new(meta)))
}
}
#[inherent(pub)]
impl glean_core::traits::Boolean for BooleanMetric {
/// Sets to the specified boolean value.
///
/// # Arguments
///
/// * `value` - the value to set.
fn set(&self, value: bool) {
let metric = Arc::clone(&self.0);
dispatcher::launch(move || crate::with_glean(|glean| metric.set(glean, value)));
}
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a boolean.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<bool> {
dispatcher::block_on_queue();
let queried_ping_name = match ping_name.into() {
Some(name) => name,
None => self.0.meta().send_in_pings.first().unwrap(),
};
crate::with_glean(|glean| self.0.test_get_value(glean, queried_ping_name))
}
}

186
third_party/rust/glean/src/private/counter.rs поставляемый
Просмотреть файл

@ -1,94 +1,92 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use glean_core::ErrorType;
use crate::dispatcher;
// We need to wrap the glean-core type: otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the developer facing API for recording counter metrics.
///
/// Instances of this class type are automatically generated by the parsers
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
#[derive(Clone)]
pub struct CounterMetric(pub(crate) Arc<glean_core::metrics::CounterMetric>);
impl CounterMetric {
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData) -> Self {
Self(Arc::new(glean_core::metrics::CounterMetric::new(meta)))
}
}
#[inherent(pub)]
impl glean_core::traits::Counter for CounterMetric {
/// Increases the counter by `amount`.
///
/// # Arguments
///
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
///
/// Logs an error if the `amount` is 0 or negative.
fn add(&self, amount: i32) {
let metric = Arc::clone(&self.0);
dispatcher::launch(move || crate::with_glean(|glean| metric.add(glean, amount)));
}
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<i32> {
dispatcher::block_on_queue();
let queried_ping_name = ping_name
.into()
.unwrap_or_else(|| &self.0.meta().send_in_pings[0]);
crate::with_glean(|glean| self.0.test_get_value(glean, queried_ping_name))
}
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32 {
dispatcher::block_on_queue();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use glean_core::ErrorType;
use crate::dispatcher;
// We need to wrap the glean-core type: otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the developer facing API for recording counter metrics.
///
/// Instances of this class type are automatically generated by the parsers
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
#[derive(Clone)]
pub struct CounterMetric(pub(crate) Arc<glean_core::metrics::CounterMetric>);
impl CounterMetric {
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData) -> Self {
Self(Arc::new(glean_core::metrics::CounterMetric::new(meta)))
}
}
#[inherent(pub)]
impl glean_core::traits::Counter for CounterMetric {
/// Increases the counter by `amount`.
///
/// # Arguments
///
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
///
/// Logs an error if the `amount` is 0 or negative.
fn add(&self, amount: i32) {
let metric = Arc::clone(&self.0);
dispatcher::launch(move || crate::with_glean(|glean| metric.add(glean, amount)));
}
/// **Exported for test purposes.**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<i32> {
dispatcher::block_on_queue();
let queried_ping_name = ping_name
.into()
.unwrap_or_else(|| &self.0.meta().send_in_pings[0]);
crate::with_glean(|glean| self.0.test_get_value(glean, queried_ping_name))
}
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32 {
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}
}

381
third_party/rust/glean/src/private/labeled.rs поставляемый
Просмотреть файл

@ -1,381 +0,0 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use glean_core::ErrorType;
use crate::dispatcher;
/// Sealed traits protect against downstream implementations.
///
/// We wrap it in a private module that is inaccessible outside of this module.
mod private {
use crate::{
private::BooleanMetric, private::CounterMetric, private::StringMetric, CommonMetricData,
};
use std::sync::Arc;
/// The sealed labeled trait.
///
/// This also allows us to hide methods, that are only used internally
/// and should not be visible to users of the object implementing the
/// `Labeled<T>` trait.
pub trait Sealed {
/// The `glean_core` metric type representing the labeled metric.
type Inner: glean_core::metrics::MetricType + Clone;
/// Create a new metric object implementing this trait from the inner type.
fn from_inner(metric: Self::Inner) -> Self;
/// Create a new `glean_core` metric from the metadata.
fn new_inner(meta: crate::CommonMetricData) -> Self::Inner;
}
// `LabeledMetric<BooleanMetric>` is possible.
//
// See [Labeled Booleans](https://mozilla.github.io/glean/book/user/metrics/labeled_booleans.html).
impl Sealed for BooleanMetric {
type Inner = glean_core::metrics::BooleanMetric;
fn from_inner(metric: Self::Inner) -> Self {
BooleanMetric(Arc::new(metric))
}
fn new_inner(meta: CommonMetricData) -> Self::Inner {
glean_core::metrics::BooleanMetric::new(meta)
}
}
// `LabeledMetric<StringMetric>` is possible.
//
// See [Labeled Strings](https://mozilla.github.io/glean/book/user/metrics/labeled_strings.html).
impl Sealed for StringMetric {
type Inner = glean_core::metrics::StringMetric;
fn from_inner(metric: Self::Inner) -> Self {
StringMetric(Arc::new(metric))
}
fn new_inner(meta: CommonMetricData) -> Self::Inner {
glean_core::metrics::StringMetric::new(meta)
}
}
// `LabeledMetric<CounterMetric>` is possible.
//
// See [Labeled Counters](https://mozilla.github.io/glean/book/user/metrics/labeled_counters.html).
impl Sealed for CounterMetric {
type Inner = glean_core::metrics::CounterMetric;
fn from_inner(metric: Self::Inner) -> Self {
CounterMetric(Arc::new(metric))
}
fn new_inner(meta: CommonMetricData) -> Self::Inner {
glean_core::metrics::CounterMetric::new(meta)
}
}
}
/// Marker trait for metrics that can be nested inside a labeled metric.
///
/// This trait is sealed and cannot be implemented for types outside this crate.
pub trait AllowLabeled: private::Sealed {}
// Implement the trait for everything we marked as allowed.
impl<T> AllowLabeled for T where T: private::Sealed {}
// We need to wrap the glean-core type: otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the specific facing API for recording labeled metrics.
///
/// Instances of this type are automatically generated by the parser
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
/// Unlike most metric types, `LabeledMetric` does not have its own corresponding
/// storage, but records metrics for the underlying metric type `T` in the storage
/// for that type.
#[derive(Clone)]
pub struct LabeledMetric<T: AllowLabeled>(
pub(crate) Arc<glean_core::metrics::LabeledMetric<T::Inner>>,
);
impl<T> LabeledMetric<T>
where
T: AllowLabeled,
{
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData, labels: Option<Vec<String>>) -> Self {
let submetric = T::new_inner(meta);
let core = glean_core::metrics::LabeledMetric::new(submetric, labels);
Self(Arc::new(core))
}
}
#[inherent(pub)]
impl<T> glean_core::traits::Labeled<T> for LabeledMetric<T>
where
T: AllowLabeled + Clone,
{
/// Gets a specific metric for a given label.
///
/// If a set of acceptable labels were specified in the `metrics.yaml` file,
/// and the given label is not in the set, it will be recorded under the special `OTHER_LABEL` label.
///
/// If a set of acceptable labels was not specified in the `metrics.yaml` file,
/// only the first 16 unique labels will be used.
/// After that, any additional labels will be recorded under the special `OTHER_LABEL` label.
///
/// Labels must be `snake_case` and less than 30 characters.
/// If an invalid label is used, the metric will be recorded in the special `OTHER_LABEL` label.
fn get(&self, label: &str) -> T {
let inner = self.0.get(label);
T::from_inner(inner)
}
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32 {
dispatcher::block_on_queue();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(
&glean,
self.0.get_submetric().meta(),
error,
ping_name.into(),
)
.unwrap_or(0)
})
}
}
#[cfg(test)]
mod test {
use super::ErrorType;
use crate::common_test::{lock_test, new_glean};
use crate::destroy_glean;
use crate::private::{BooleanMetric, CounterMetric, LabeledMetric, StringMetric};
use crate::CommonMetricData;
#[test]
fn test_labeled_counter_type() {
let _lock = lock_test();
let _t = new_glean(None, true);
let metric: LabeledMetric<CounterMetric> = LabeledMetric::new(
CommonMetricData {
name: "labeled_counter".into(),
category: "labeled".into(),
send_in_pings: vec!["test1".into()],
..Default::default()
},
None,
);
metric.get("label1").add(1);
metric.get("label2").add(2);
assert_eq!(1, metric.get("label1").test_get_value("test1").unwrap());
assert_eq!(2, metric.get("label2").test_get_value("test1").unwrap());
}
#[test]
fn test_other_label_with_predefined_labels() {
let _lock = lock_test();
let _t = new_glean(None, true);
let metric: LabeledMetric<CounterMetric> = LabeledMetric::new(
CommonMetricData {
name: "labeled_counter".into(),
category: "labeled".into(),
send_in_pings: vec!["test1".into()],
..Default::default()
},
Some(vec!["foo".into(), "bar".into(), "baz".into()]),
);
metric.get("foo").add(1);
metric.get("foo").add(2);
metric.get("bar").add(1);
metric.get("not_there").add(1);
metric.get("also_not_there").add(1);
metric.get("not_me").add(1);
assert_eq!(3, metric.get("foo").test_get_value(None).unwrap());
assert_eq!(1, metric.get("bar").test_get_value(None).unwrap());
assert!(metric.get("baz").test_get_value(None).is_none());
// The rest all lands in the __other__ bucket.
assert_eq!(3, metric.get("__other__").test_get_value(None).unwrap());
}
#[test]
fn test_other_label_without_predefined_labels() {
let _lock = lock_test();
let _t = new_glean(None, true);
let metric: LabeledMetric<CounterMetric> = LabeledMetric::new(
CommonMetricData {
name: "labeled_counter".into(),
category: "labeled".into(),
send_in_pings: vec!["test1".into()],
..Default::default()
},
None,
);
// Record in 20 labels: it will go over the maximum number of supported
// dynamic labels.
for i in 0..=20 {
metric.get(format!("label_{}", i).as_str()).add(1);
}
// Record in a label once again.
metric.get("label_0").add(1);
assert_eq!(2, metric.get("label_0").test_get_value(None).unwrap());
for i in 1..15 {
assert_eq!(
1,
metric
.get(format!("label_{}", i).as_str())
.test_get_value(None)
.unwrap()
);
}
assert_eq!(5, metric.get("__other__").test_get_value(None).unwrap());
}
#[test]
fn test_other_label_without_predefined_labels_before_glean_init() {
let _lock = lock_test();
// We explicitly want Glean to not be initialized.
destroy_glean(true);
let metric: LabeledMetric<CounterMetric> = LabeledMetric::new(
CommonMetricData {
name: "labeled_counter".into(),
category: "labeled".into(),
send_in_pings: vec!["test1".into()],
..Default::default()
},
None,
);
// Record in 20 labels: it will go over the maximum number of supported
// dynamic labels.
for i in 0..=20 {
metric.get(format!("label_{}", i).as_str()).add(1);
}
// Record in a label once again.
metric.get("label_0").add(1);
// Initialize Glean.
let _t = new_glean(None, false);
assert_eq!(2, metric.get("label_0").test_get_value(None).unwrap());
for i in 1..15 {
assert_eq!(
1,
metric
.get(format!("label_{}", i).as_str())
.test_get_value(None)
.unwrap()
);
}
assert_eq!(5, metric.get("__other__").test_get_value(None).unwrap());
}
#[test]
fn test_labeled_string_type() {
let _lock = lock_test();
let _t = new_glean(None, true);
let metric: LabeledMetric<StringMetric> = LabeledMetric::new(
CommonMetricData {
name: "labeled_string".into(),
category: "labeled".into(),
send_in_pings: vec!["test1".into()],
..Default::default()
},
None,
);
metric.get("label1").set("foo");
metric.get("label2").set("bar");
assert_eq!("foo", metric.get("label1").test_get_value("test1").unwrap());
assert_eq!("bar", metric.get("label2").test_get_value("test1").unwrap());
}
#[test]
fn test_labeled_boolean_type() {
let _lock = lock_test();
let _t = new_glean(None, true);
let metric: LabeledMetric<BooleanMetric> = LabeledMetric::new(
CommonMetricData {
name: "labeled_boolean".into(),
category: "labeled".into(),
send_in_pings: vec!["test1".into()],
..Default::default()
},
None,
);
metric.get("label1").set(false);
metric.get("label2").set(true);
assert!(!metric.get("label1").test_get_value("test1").unwrap());
assert!(metric.get("label2").test_get_value("test1").unwrap());
}
#[test]
fn test_invalid_labels_record_errors() {
let _lock = lock_test();
let _t = new_glean(None, true);
let metric: LabeledMetric<BooleanMetric> = LabeledMetric::new(
CommonMetricData {
name: "labeled_boolean".into(),
category: "labeled".into(),
send_in_pings: vec!["test1".into()],
..Default::default()
},
None,
);
let invalid_label = "!#I'm invalid#--_";
metric.get(invalid_label).set(true);
assert_eq!(true, metric.get("__other__").test_get_value(None).unwrap());
assert_eq!(
1,
metric.test_get_num_recorded_errors(ErrorType::InvalidLabel, None)
);
}
}

40
third_party/rust/glean/src/private/mod.rs поставляемый
Просмотреть файл

@ -1,23 +1,17 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! The different metric types supported by the Glean SDK to handle data.
mod boolean;
mod counter;
mod labeled;
mod ping;
mod quantity;
mod recorded_experiment_data;
mod string;
mod uuid;
pub use self::uuid::UuidMetric;
pub use boolean::BooleanMetric;
pub use counter::CounterMetric;
pub use labeled::{AllowLabeled, LabeledMetric};
pub use ping::PingType;
pub use quantity::QuantityMetric;
pub use recorded_experiment_data::RecordedExperimentData;
pub use string::StringMetric;
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! The different metric types supported by the Glean SDK to handle data.
mod boolean;
mod counter;
mod ping;
mod recorded_experiment_data;
mod string;
pub use boolean::BooleanMetric;
pub use counter::CounterMetric;
pub use ping::PingType;
pub use recorded_experiment_data::RecordedExperimentData;
pub use string::StringMetric;

129
third_party/rust/glean/src/private/ping.rs поставляемый
Просмотреть файл

@ -1,67 +1,62 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
/// A Glean ping.
#[derive(Clone, Debug)]
pub struct PingType {
pub(crate) name: String,
pub(crate) ping_type: glean_core::metrics::PingType,
}
impl PingType {
/// Creates a new ping type.
///
/// # Arguments
///
/// * `name` - The name of the ping.
/// * `include_client_id` - Whether to include the client ID in the assembled ping when.
/// * `send_if_empty` - Whether the ping should be sent empty or not.
/// * `reason_codes` - The valid reason codes for this ping.
pub fn new<A: Into<String>>(
name: A,
include_client_id: bool,
send_if_empty: bool,
reason_codes: Vec<String>,
) -> Self {
let name = name.into();
let ping_type = glean_core::metrics::PingType::new(
name.clone(),
include_client_id,
send_if_empty,
reason_codes,
);
let me = Self { name, ping_type };
crate::register_ping_type(&me);
me
}
}
#[inherent(pub)]
impl glean_core::traits::Ping for PingType {
/// Collect and submit the ping for eventual upload.
///
/// This will collect all stored data to be included in the ping.
/// Data with lifetime `ping` will then be reset.
///
/// If the ping is configured with `send_if_empty = false`
/// and the ping currently contains no content,
/// it will not be queued for upload.
/// If the ping is configured with `send_if_empty = true`
/// it will be queued for upload even if otherwise empty.
///
/// Pings always contain the `ping_info` and `client_info` sections.
/// See [ping sections](https://mozilla.github.io/glean/book/user/pings/index.html#ping-sections)
/// for details.
///
/// # Arguments
///
/// * `reason` - The reason the ping is being submitted.
/// Must be one of the configured `reason_codes`.
fn submit(&self, reason: Option<&str>) {
crate::submit_ping(self, reason)
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
/// A Glean ping.
#[derive(Clone, Debug)]
pub struct PingType {
pub(crate) name: String,
pub(crate) ping_type: glean_core::metrics::PingType,
}
impl PingType {
/// Creates a new ping type.
///
/// # Arguments
///
/// * `name` - The name of the ping.
/// * `include_client_id` - Whether to include the client ID in the assembled ping when.
/// * `send_if_empty` - Whether the ping should be sent empty or not.
/// * `reason_codes` - The valid reason codes for this ping.
pub fn new<A: Into<String>>(
name: A,
include_client_id: bool,
send_if_empty: bool,
reason_codes: Vec<String>,
) -> Self {
let name = name.into();
let ping_type = glean_core::metrics::PingType::new(
name.clone(),
include_client_id,
send_if_empty,
reason_codes,
);
let me = Self { name, ping_type };
crate::register_ping_type(&me);
me
}
/// Collect and submit the ping for eventual upload.
///
/// This will collect all stored data to be included in the ping.
/// Data with lifetime `ping` will then be reset.
///
/// If the ping is configured with `send_if_empty = false`
/// and the ping currently contains no content,
/// it will not be queued for upload.
/// If the ping is configured with `send_if_empty = true`
/// it will be queued for upload even if otherwise empty.
///
/// Pings always contain the `ping_info` and `client_info` sections.
/// See [ping sections](https://mozilla.github.io/glean/book/user/pings/index.html#ping-sections)
/// for details.
///
/// # Arguments
///
/// * `reason` - The reason the ping is being submitted.
/// Must be one of the configured `reason_codes`.
pub fn submit(&self, reason: Option<&str>) {
crate::submit_ping(self, reason)
}
}

Просмотреть файл

@ -1,91 +0,0 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use glean_core::ErrorType;
use crate::dispatcher;
// We need to wrap the glean-core type, otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the developer facing API for recording Quantity metrics.
///
/// Instances of this class type are automatically generated by the parsers
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
#[derive(Clone)]
pub struct QuantityMetric(pub(crate) Arc<glean_core::metrics::QuantityMetric>);
impl QuantityMetric {
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData) -> Self {
Self(Arc::new(glean_core::metrics::QuantityMetric::new(meta)))
}
}
#[inherent(pub)]
impl glean_core::traits::Quantity for QuantityMetric {
/// Sets to the specified value. Must be non-negative.
///
/// # Arguments
///
/// * `value` - The Quantity to set the metric to.
fn set(&self, value: i64) {
let metric = Arc::clone(&self.0);
dispatcher::launch(move || crate::with_glean(|glean| metric.set(glean, value)));
}
/// **Exported for test purposes.**
///
/// Gets the currently stored value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<i64> {
dispatcher::block_on_queue();
let queried_ping_name = ping_name
.into()
.unwrap_or_else(|| &self.0.meta().send_in_pings[0]);
crate::with_glean(|glean| self.0.test_get_value(glean, queried_ping_name))
}
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
#[allow(dead_code)] // Remove after mozilla/glean#1328
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32 {
dispatcher::block_on_queue();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}
}

Просмотреть файл

@ -1,15 +1,15 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::HashMap;
use serde::Deserialize;
/// Deserialized experiment data.
#[derive(Clone, Deserialize, Debug)]
pub struct RecordedExperimentData {
/// The experiment's branch as set through `set_experiment_active`.
pub branch: String,
/// Any extra data associated with this experiment through `set_experiment_active`.
pub extra: Option<HashMap<String, String>>,
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::HashMap;
use serde::Deserialize;
/// Deserialized experiment data.
#[derive(Clone, Deserialize, Debug)]
pub struct RecordedExperimentData {
/// The experiment's branch as set through `set_experiment_active`.
pub branch: String,
/// Any extra data associated with this experiment through `set_experiment_active`.
pub extra: Option<HashMap<String, String>>,
}

194
third_party/rust/glean/src/private/string.rs поставляемый
Просмотреть файл

@ -1,98 +1,96 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use glean_core::ErrorType;
use crate::dispatcher;
// We need to wrap the glean-core type, otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the developer facing API for recording string metrics.
///
/// Instances of this class type are automatically generated by the parsers
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
#[derive(Clone)]
pub struct StringMetric(pub(crate) Arc<glean_core::metrics::StringMetric>);
impl StringMetric {
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData) -> Self {
Self(Arc::new(glean_core::metrics::StringMetric::new(meta)))
}
}
#[inherent(pub)]
impl glean_core::traits::String for StringMetric {
/// Sets to the specified value.
///
/// # Arguments
///
/// * `value` - The string to set the metric to.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
fn set<S: Into<std::string::String>>(&self, value: S) {
let metric = Arc::clone(&self.0);
let new_value = value.into();
dispatcher::launch(move || crate::with_glean(|glean| metric.set(glean, new_value)));
}
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<std::string::String> {
dispatcher::block_on_queue();
let queried_ping_name = ping_name
.into()
.unwrap_or_else(|| &self.0.meta().send_in_pings[0]);
crate::with_glean(|glean| self.0.test_get_value(glean, queried_ping_name))
}
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32 {
dispatcher::block_on_queue();
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use glean_core::ErrorType;
use crate::dispatcher;
// We need to wrap the glean-core type, otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the developer facing API for recording string metrics.
///
/// Instances of this class type are automatically generated by the parsers
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
#[derive(Clone)]
pub struct StringMetric(pub(crate) Arc<glean_core::metrics::StringMetric>);
impl StringMetric {
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData) -> Self {
Self(Arc::new(glean_core::metrics::StringMetric::new(meta)))
}
}
#[inherent(pub)]
impl glean_core::traits::String for StringMetric {
/// Sets to the specified value.
///
/// # Arguments
///
/// * `value` - The string to set the metric to.
///
/// ## Notes
///
/// Truncates the value if it is longer than `MAX_STRING_LENGTH` bytes and logs an error.
fn set<S: Into<std::string::String>>(&self, value: S) {
let metric = Arc::clone(&self.0);
let new_value = value.into();
dispatcher::launch(move || crate::with_glean(|glean| metric.set(glean, new_value)));
}
/// **Exported for test purposes.**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(
&self,
ping_name: S,
) -> Option<std::string::String> {
dispatcher::block_on_queue();
let queried_ping_name = ping_name
.into()
.unwrap_or_else(|| &self.0.meta().send_in_pings[0]);
crate::with_glean(|glean| self.0.test_get_value(glean, queried_ping_name))
}
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32 {
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}
}

96
third_party/rust/glean/src/private/uuid.rs поставляемый
Просмотреть файл

@ -1,96 +0,0 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use inherent::inherent;
use std::sync::Arc;
use glean_core::metrics::MetricType;
use glean_core::ErrorType;
use crate::dispatcher;
// We need to wrap the glean-core type, otherwise if we try to implement
// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
// only traits defined in the current crate can be implemented for arbitrary
// types.
/// This implements the developer facing API for recording UUID metrics.
///
/// Instances of this class type are automatically generated by the parsers
/// at build time, allowing developers to record values that were previously
/// registered in the metrics.yaml file.
#[derive(Clone)]
pub struct UuidMetric(pub(crate) Arc<glean_core::metrics::UuidMetric>);
impl UuidMetric {
/// The public constructor used by automatically generated metrics.
pub fn new(meta: glean_core::CommonMetricData) -> Self {
Self(Arc::new(glean_core::metrics::UuidMetric::new(meta)))
}
}
#[inherent(pub)]
impl glean_core::traits::Uuid for UuidMetric {
/// Sets to the specified value.
///
/// # Arguments
///
/// * `value` - The UUID to set the metric to.
fn set(&self, value: uuid::Uuid) {
let metric = Arc::clone(&self.0);
dispatcher::launch(move || crate::with_glean(|glean| metric.set(glean, value)));
}
/// Generates a new random UUID and sets the metric to it.
fn generate_and_set(&self) -> uuid::Uuid {
// TODO: We can use glean-core's generate_and_set after bug 1673017.
let uuid = uuid::Uuid::new_v4();
self.set(uuid);
uuid
}
/// **Exported for test purposes.**
///
/// Gets the currently stored value.
///
/// This doesn't clear the stored value.
///
/// # Arguments
///
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<uuid::Uuid> {
dispatcher::block_on_queue();
let queried_ping_name = ping_name
.into()
.unwrap_or_else(|| &self.0.meta().send_in_pings[0]);
crate::with_glean(|glean| self.0.test_get_value(glean, queried_ping_name))
}
/// **Exported for test purposes.**
///
/// Gets the number of recorded errors for the given metric and error type.
///
/// # Arguments
///
/// * `error` - The type of error
/// * `ping_name` - represents the optional name of the ping to retrieve the
/// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
/// The number of errors reported.
fn test_get_num_recorded_errors<'a, S: Into<Option<&'a str>>>(
&self,
error: ErrorType,
ping_name: S,
) -> i32 {
crate::with_glean_mut(|glean| {
glean_core::test_get_num_recorded_errors(&glean, self.0.meta(), error, ping_name.into())
.unwrap_or(0)
})
}
}

110
third_party/rust/glean/src/system.rs поставляемый
Просмотреть файл

@ -1,55 +1,55 @@
// Copyright (c) 2017 The Rust Project Developers
// Licensed under the MIT License.
// Original license:
// https://github.com/RustSec/platforms-crate/blob/ebbd3403243067ba3096f31684557285e352b639/LICENSE-MIT
//
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Detect and expose `target_arch` as a constant
#[cfg(target_arch = "aarch64")]
/// `target_arch` when building this crate: `aarch64`
pub const ARCH: &str = "aarch64";
#[cfg(target_arch = "arm")]
/// `target_arch` when building this crate: `arm`
pub const ARCH: &str = "arm";
#[cfg(target_arch = "x86")]
/// `target_arch` when building this crate: `x86`
pub const ARCH: &str = "x86";
#[cfg(target_arch = "x86_64")]
/// `target_arch` when building this crate: `x86_64`
pub const ARCH: &str = "x86_64";
#[cfg(not(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86",
target_arch = "x86_64"
)))]
/// `target_arch` when building this crate: unknown!
pub const ARCH: &str = "unknown";
// Copyright (c) 2017 The Rust Project Developers
// Licensed under the MIT License.
// Original license:
// https://github.com/RustSec/platforms-crate/blob/ebbd3403243067ba3096f31684557285e352b639/LICENSE-MIT
//
// Permission is hereby granted, free of charge, to any
// person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the
// Software without restriction, including without
// limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice
// shall be included in all copies or substantial portions
// of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Detect and expose `target_arch` as a constant
#[cfg(target_arch = "aarch64")]
/// `target_arch` when building this crate: `aarch64`
pub const ARCH: &str = "aarch64";
#[cfg(target_arch = "arm")]
/// `target_arch` when building this crate: `arm`
pub const ARCH: &str = "arm";
#[cfg(target_arch = "x86")]
/// `target_arch` when building this crate: `x86`
pub const ARCH: &str = "x86";
#[cfg(target_arch = "x86_64")]
/// `target_arch` when building this crate: `x86_64`
pub const ARCH: &str = "x86_64";
#[cfg(not(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86",
target_arch = "x86_64"
)))]
/// `target_arch` when building this crate: unknown!
pub const ARCH: &str = "unknown";

886
third_party/rust/glean/src/test.rs поставляемый
Просмотреть файл

@ -1,483 +1,403 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::private::{BooleanMetric, CounterMetric};
use std::path::PathBuf;
use super::*;
use crate::common_test::{lock_test, new_glean, GLOBAL_APPLICATION_ID};
#[test]
fn send_a_ping() {
let _lock = lock_test();
let (s, r) = crossbeam_channel::bounded::<String>(1);
// Define a fake uploader that reports back the submission URL
// using a crossbeam channel.
#[derive(Debug)]
pub struct FakeUploader {
sender: crossbeam_channel::Sender<String>,
};
impl net::PingUploader for FakeUploader {
fn upload(
&self,
url: String,
_body: Vec<u8>,
_headers: Vec<(String, String)>,
) -> net::UploadResult {
self.sender.send(url).unwrap();
net::UploadResult::HttpStatus(200)
}
}
// Create a custom configuration to use a fake uploader.
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: Some(Box::new(FakeUploader { sender: s })),
};
let _t = new_glean(Some(cfg), true);
crate::dispatcher::block_on_queue();
// Define a new ping and submit it.
const PING_NAME: &str = "test-ping";
let custom_ping = private::PingType::new(PING_NAME, true, true, vec![]);
custom_ping.submit(None);
// Wait for the ping to arrive.
let url = r.recv().unwrap();
assert_eq!(url.contains(PING_NAME), true);
}
#[test]
fn disabling_upload_disables_metrics_recording() {
let _lock = lock_test();
let _t = new_glean(None, true);
crate::dispatcher::block_on_queue();
let metric = BooleanMetric::new(CommonMetricData {
name: "bool_metric".into(),
category: "test".into(),
send_in_pings: vec!["store1".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
});
crate::set_upload_enabled(false);
assert!(metric.test_get_value("store1").is_none())
}
#[test]
fn test_experiments_recording() {
let _lock = lock_test();
let _t = new_glean(None, true);
set_experiment_active("experiment_test".to_string(), "branch_a".to_string(), None);
let mut extra = HashMap::new();
extra.insert("test_key".to_string(), "value".to_string());
set_experiment_active(
"experiment_api".to_string(),
"branch_b".to_string(),
Some(extra),
);
assert!(test_is_experiment_active("experiment_test".to_string()));
assert!(test_is_experiment_active("experiment_api".to_string()));
set_experiment_inactive("experiment_test".to_string());
assert!(!test_is_experiment_active("experiment_test".to_string()));
assert!(test_is_experiment_active("experiment_api".to_string()));
let stored_data = test_get_experiment_data("experiment_api".to_string());
assert_eq!("branch_b", stored_data.branch);
assert_eq!("value", stored_data.extra.unwrap()["test_key"]);
}
#[test]
fn test_experiments_recording_before_glean_inits() {
let _lock = lock_test();
// Destroy the existing glean instance from glean-core so that we
// can test the pre-init queueing of the experiment api commands.
// This is doing the exact same thing that `reset_glean` is doing
// but without calling `initialize`.
if was_initialize_called() {
// We need to check if the Glean object (from glean-core) is
// initialized, otherwise this will crash on the first test
// due to bug 1675215 (this check can be removed once that
// bug is fixed).
if global_glean().is_some() {
with_glean_mut(|glean| {
glean.test_clear_all_stores();
glean.destroy_db();
});
}
// Allow us to go through initialization again.
INITIALIZE_CALLED.store(false, Ordering::SeqCst);
// Reset the dispatcher.
dispatcher::reset_dispatcher();
}
set_experiment_active(
"experiment_set_preinit".to_string(),
"branch_a".to_string(),
None,
);
set_experiment_active(
"experiment_preinit_disabled".to_string(),
"branch_a".to_string(),
None,
);
set_experiment_inactive("experiment_preinit_disabled".to_string());
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
test_reset_glean(
Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
ClientInfoMetrics::unknown(),
false,
);
crate::dispatcher::block_on_queue();
assert!(test_is_experiment_active(
"experiment_set_preinit".to_string()
));
assert!(!test_is_experiment_active(
"experiment_preinit_disabled".to_string()
));
}
#[test]
#[ignore] // TODO: To be done in bug 1673645.
fn test_sending_of_foreground_background_pings() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1672958.
fn test_sending_of_startup_baseline_ping() {
todo!()
}
#[test]
fn initialize_must_not_crash_if_data_dir_is_messed_up() {
let _lock = lock_test();
let dir = tempfile::tempdir().unwrap();
let tmpdirname = dir.path().display().to_string();
// Create a file in the temporary dir and use that as the
// name of the Glean data dir.
let file_path = PathBuf::from(tmpdirname).join("notadir");
std::fs::write(file_path.clone(), "test").expect("The test Glean dir file must be created");
let cfg = Configuration {
data_path: file_path.to_string_lossy().to_string(),
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
};
test_reset_glean(cfg, ClientInfoMetrics::unknown(), false);
// TODO(bug 1675215): ensure initialize runs through dispatcher.
// Glean init is async and, for this test, it bails out early due to
// an caused by not being able to create the data dir: we can do nothing
// but wait. Tests in other bindings use the dispatcher's test mode, which
// runs tasks sequentially on the main thread, so no sleep is required,
// because we're guaranteed that, once we reach this point, the full
// init potentially ran.
std::thread::sleep(std::time::Duration::from_secs(3));
}
#[test]
fn queued_recorded_metrics_correctly_record_during_init() {
let _lock = lock_test();
destroy_glean(true);
let metric = CounterMetric::new(CommonMetricData {
name: "counter_metric".into(),
category: "test".into(),
send_in_pings: vec!["store1".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
});
// This will queue 3 tasks that will add to the metric value once Glean is initialized
for _ in 0..3 {
metric.add(1);
}
// TODO: To be fixed in bug 1677150.
// Ensure that no value has been stored yet since the tasks have only been queued
// and not executed yet
// Calling `new_glean` here will cause Glean to be initialized and should cause the queued
// tasks recording metrics to execute
let _t = new_glean(None, false);
// Verify that the callback was executed by testing for the correct value
assert!(metric.test_get_value(None).is_some(), "Value must exist");
assert_eq!(3, metric.test_get_value(None).unwrap(), "Value must match");
}
#[test]
fn initializing_twice_is_a_noop() {
let _lock = lock_test();
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
test_reset_glean(
Configuration {
data_path: tmpname.clone(),
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
ClientInfoMetrics::unknown(),
true,
);
dispatcher::block_on_queue();
test_reset_glean(
Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
ClientInfoMetrics::unknown(),
false,
);
// TODO(bug 1675215): ensure initialize runs through dispatcher.
// Glean init is async and, for this test, it bails out early due to
// being initialized: we can do nothing but wait. Tests in other bindings use
// the dispatcher's test mode, which runs tasks sequentially on the main
// thread, so no sleep is required. Bug 1675215 might fix this, as well.
std::thread::sleep(std::time::Duration::from_secs(3));
}
#[test]
#[ignore] // TODO: To be done in bug 1673668.
fn dont_handle_events_when_uninitialized() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn the_app_channel_must_be_correctly_set_if_requested() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn ping_collection_must_happen_after_concurrently_scheduled_metrics_recordings() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn basic_metrics_should_be_cleared_when_disabling_uploading() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn core_metrics_should_be_cleared_and_restored_when_disabling_and_enabling_uploading() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn overflowing_the_task_queue_records_telemetry() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn sending_deletion_ping_if_disabled_outside_of_run() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn no_sending_of_deletion_ping_if_unchanged_outside_of_run() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn test_sending_of_startup_baseline_ping_with_application_lifetime_metric() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn test_dirty_flag_is_reset_to_false() {
todo!()
}
#[test]
fn setting_debug_view_tag_before_initialization_should_not_crash() {
let _lock = lock_test();
destroy_glean(true);
assert!(!was_initialize_called());
// Define a fake uploader that reports back the submission headers
// using a crossbeam channel.
let (s, r) = crossbeam_channel::bounded::<Vec<(String, String)>>(1);
#[derive(Debug)]
pub struct FakeUploader {
sender: crossbeam_channel::Sender<Vec<(String, String)>>,
};
impl net::PingUploader for FakeUploader {
fn upload(
&self,
_url: String,
_body: Vec<u8>,
headers: Vec<(String, String)>,
) -> net::UploadResult {
self.sender.send(headers).unwrap();
net::UploadResult::HttpStatus(200)
}
}
// Attempt to set a debug view tag before Glean is initialized.
set_debug_view_tag("valid-tag");
// Create a custom configuration to use a fake uploader.
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: Some(Box::new(FakeUploader { sender: s })),
};
let _t = new_glean(Some(cfg), true);
crate::dispatcher::block_on_queue();
// Submit a baseline ping.
submit_ping_by_name("baseline", Some("background"));
// Wait for the ping to arrive.
let headers = r.recv().unwrap();
assert_eq!(
"valid-tag",
headers.iter().find(|&kv| kv.0 == "X-Debug-ID").unwrap().1
);
}
#[test]
fn setting_source_tags_before_initialization_should_not_crash() {
let _lock = lock_test();
destroy_glean(true);
assert!(!was_initialize_called());
// Define a fake uploader that reports back the submission headers
// using a crossbeam channel.
let (s, r) = crossbeam_channel::bounded::<Vec<(String, String)>>(1);
#[derive(Debug)]
pub struct FakeUploader {
sender: crossbeam_channel::Sender<Vec<(String, String)>>,
};
impl net::PingUploader for FakeUploader {
fn upload(
&self,
_url: String,
_body: Vec<u8>,
headers: Vec<(String, String)>,
) -> net::UploadResult {
self.sender.send(headers).unwrap();
net::UploadResult::HttpStatus(200)
}
}
// Attempt to set source tags before Glean is initialized.
set_source_tags(vec!["valid-tag1".to_string(), "valid-tag2".to_string()]);
// Create a custom configuration to use a fake uploader.
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: Some(Box::new(FakeUploader { sender: s })),
};
let _t = new_glean(Some(cfg), true);
crate::dispatcher::block_on_queue();
// Submit a baseline ping.
submit_ping_by_name("baseline", Some("background"));
// Wait for the ping to arrive.
let headers = r.recv().unwrap();
assert_eq!(
"valid-tag1,valid-tag2",
headers
.iter()
.find(|&kv| kv.0 == "X-Source-Tags")
.unwrap()
.1
);
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn flipping_upload_enabled_respects_order_of_events() {
todo!()
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::private::{BooleanMetric, CounterMetric};
use once_cell::sync::Lazy;
use std::path::PathBuf;
use std::sync::Mutex;
use super::*;
// Because glean_preview is a global-singleton, we need to run the tests one-by-one to avoid different tests stomping over each other.
// This is only an issue because we're resetting Glean, this cannot happen in normal use of the
// RLB.
//
// We use a global lock to force synchronization of all tests, even if run multi-threaded.
// This allows us to run without `--test-threads 1`.`
static GLOBAL_LOCK: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
const GLOBAL_APPLICATION_ID: &str = "org.mozilla.rlb.test";
// Create a new instance of Glean with a temporary directory.
// We need to keep the `TempDir` alive, so that it's not deleted before we stop using it.
fn new_glean(configuration: Option<Configuration>, clear_stores: bool) -> tempfile::TempDir {
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = match configuration {
Some(c) => c,
None => Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
};
crate::reset_glean(cfg, ClientInfoMetrics::unknown(), clear_stores);
dir
}
#[test]
fn send_a_ping() {
let _lock = GLOBAL_LOCK.lock().unwrap();
env_logger::try_init().ok();
let (s, r) = crossbeam_channel::bounded::<String>(1);
// Define a fake uploader that reports back the submission URL
// using a crossbeam channel.
#[derive(Debug)]
pub struct FakeUploader {
sender: crossbeam_channel::Sender<String>,
};
impl net::PingUploader for FakeUploader {
fn upload(
&self,
url: String,
_body: Vec<u8>,
_headers: Vec<(String, String)>,
) -> net::UploadResult {
self.sender.send(url).unwrap();
net::UploadResult::HttpStatus(200)
}
}
// Create a custom configuration to use a fake uploader.
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: Some(Box::new(FakeUploader { sender: s })),
};
let _t = new_glean(Some(cfg), true);
crate::dispatcher::block_on_queue();
// Define a new ping and submit it.
const PING_NAME: &str = "test-ping";
let custom_ping = private::PingType::new(PING_NAME, true, true, vec![]);
custom_ping.submit(None);
// Wait for the ping to arrive.
let url = r.recv().unwrap();
assert_eq!(url.contains(PING_NAME), true);
}
#[test]
fn disabling_upload_disables_metrics_recording() {
let _lock = GLOBAL_LOCK.lock().unwrap();
env_logger::try_init().ok();
let _t = new_glean(None, true);
crate::dispatcher::block_on_queue();
let metric = BooleanMetric::new(CommonMetricData {
name: "bool_metric".into(),
category: "test".into(),
send_in_pings: vec!["store1".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
});
crate::set_upload_enabled(false);
assert!(metric.test_get_value("store1").is_none())
}
#[test]
fn test_experiments_recording() {
// setup glean for the test
let _lock = GLOBAL_LOCK.lock().unwrap();
env_logger::try_init().ok();
let _t = new_glean(None, true);
set_experiment_active("experiment_test".to_string(), "branch_a".to_string(), None);
let mut extra = HashMap::new();
extra.insert("test_key".to_string(), "value".to_string());
set_experiment_active(
"experiment_api".to_string(),
"branch_b".to_string(),
Some(extra),
);
assert!(test_is_experiment_active("experiment_test".to_string()));
assert!(test_is_experiment_active("experiment_api".to_string()));
set_experiment_inactive("experiment_test".to_string());
assert!(!test_is_experiment_active("experiment_test".to_string()));
assert!(test_is_experiment_active("experiment_api".to_string()));
let stored_data = test_get_experiment_data("experiment_api".to_string());
assert_eq!("branch_b", stored_data.branch);
assert_eq!("value", stored_data.extra.unwrap()["test_key"]);
}
#[test]
fn test_experiments_recording_before_glean_inits() {
let _lock = GLOBAL_LOCK.lock().unwrap();
env_logger::try_init().ok();
// Destroy the existing glean instance from glean-core so that we
// can test the pre-init queueing of the experiment api commands.
// This is doing the exact same thing that `reset_glean` is doing
// but without calling `initialize`.
if was_initialize_called() {
// We need to check if the Glean object (from glean-core) is
// initialized, otherwise this will crash on the first test
// due to bug 1675215 (this check can be removed once that
// bug is fixed).
if global_glean().is_some() {
with_glean_mut(|glean| {
glean.test_clear_all_stores();
glean.destroy_db();
});
}
// Allow us to go through initialization again.
INITIALIZE_CALLED.store(false, Ordering::SeqCst);
// Reset the dispatcher.
dispatcher::reset_dispatcher();
}
set_experiment_active(
"experiment_set_preinit".to_string(),
"branch_a".to_string(),
None,
);
set_experiment_active(
"experiment_preinit_disabled".to_string(),
"branch_a".to_string(),
None,
);
set_experiment_inactive("experiment_preinit_disabled".to_string());
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
reset_glean(
Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
ClientInfoMetrics::unknown(),
false,
);
crate::dispatcher::block_on_queue();
assert!(test_is_experiment_active(
"experiment_set_preinit".to_string()
));
assert!(!test_is_experiment_active(
"experiment_preinit_disabled".to_string()
));
}
#[test]
#[ignore] // TODO: To be done in bug 1673645.
fn test_sending_of_foreground_background_pings() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1672958.
fn test_sending_of_startup_baseline_ping() {
todo!()
}
#[test]
fn initialize_must_not_crash_if_data_dir_is_messed_up() {
let _lock = GLOBAL_LOCK.lock().unwrap();
env_logger::try_init().ok();
let dir = tempfile::tempdir().unwrap();
let tmpdirname = dir.path().display().to_string();
// Create a file in the temporary dir and use that as the
// name of the Glean data dir.
let file_path = PathBuf::from(tmpdirname).join("notadir");
std::fs::write(file_path.clone(), "test").expect("The test Glean dir file must be created");
let cfg = Configuration {
data_path: file_path.to_string_lossy().to_string(),
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
};
reset_glean(cfg, ClientInfoMetrics::unknown(), false);
// TODO(bug 1675215): ensure initialize runs through dispatcher.
// Glean init is async and, for this test, it bails out early due to
// an caused by not being able to create the data dir: we can do nothing
// but wait. Tests in other bindings use the dispatcher's test mode, which
// runs tasks sequentially on the main thread, so no sleep is required,
// because we're guaranteed that, once we reach this point, the full
// init potentially ran.
std::thread::sleep(std::time::Duration::from_secs(3));
}
#[test]
fn queued_recorded_metrics_correctly_record_during_init() {
let _lock = GLOBAL_LOCK.lock().unwrap();
env_logger::try_init().ok();
destroy_glean(true);
let metric = CounterMetric::new(CommonMetricData {
name: "counter_metric".into(),
category: "test".into(),
send_in_pings: vec!["store1".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
});
// This will queue 3 tasks that will add to the metric value once Glean is initialized
for _ in 0..3 {
metric.add(1);
}
// TODO: To be fixed in bug 1677150.
// Ensure that no value has been stored yet since the tasks have only been queued
// and not executed yet
// Calling `new_glean` here will cause Glean to be initialized and should cause the queued
// tasks recording metrics to execute
let _ = new_glean(None, false);
// Verify that the callback was executed by testing for the correct value
assert!(metric.test_get_value(None).is_some(), "Value must exist");
assert_eq!(3, metric.test_get_value(None).unwrap(), "Value must match");
}
#[test]
fn initializing_twice_is_a_noop() {
let _lock = GLOBAL_LOCK.lock().unwrap();
env_logger::try_init().ok();
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
reset_glean(
Configuration {
data_path: tmpname.clone(),
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
ClientInfoMetrics::unknown(),
true,
);
dispatcher::block_on_queue();
reset_glean(
Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
ClientInfoMetrics::unknown(),
false,
);
// TODO(bug 1675215): ensure initialize runs through dispatcher.
// Glean init is async and, for this test, it bails out early due to
// being initialized: we can do nothing but wait. Tests in other bindings use
// the dispatcher's test mode, which runs tasks sequentially on the main
// thread, so no sleep is required. Bug 1675215 might fix this, as well.
std::thread::sleep(std::time::Duration::from_secs(3));
}
#[test]
#[ignore] // TODO: To be done in bug 1673668.
fn dont_handle_events_when_uninitialized() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn the_app_channel_must_be_correctly_set_if_requested() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn ping_collection_must_happen_after_concurrently_scheduled_metrics_recordings() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn basic_metrics_should_be_cleared_when_disabling_uploading() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn core_metrics_should_be_cleared_and_restored_when_disabling_and_enabling_uploading() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn overflowing_the_task_queue_records_telemetry() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn sending_deletion_ping_if_disabled_outside_of_run() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn no_sending_of_deletion_ping_if_unchanged_outside_of_run() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn test_sending_of_startup_baseline_ping_with_application_lifetime_metric() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn test_dirty_flag_is_reset_to_false() {
todo!()
}
#[test]
#[ignore] // TODO: To be done in bug 1673672.
fn flipping_upload_enabled_respects_order_of_events() {
todo!()
}

242
third_party/rust/glean/tests/schema.rs поставляемый
Просмотреть файл

@ -1,121 +1,121 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::io::Read;
use flate2::read::GzDecoder;
use jsonschema_valid::{self, schemas::Draft};
use serde_json::Value;
use glean::{ClientInfoMetrics, Configuration};
const SCHEMA_JSON: &str = include_str!("../../../glean.1.schema.json");
fn load_schema() -> Value {
serde_json::from_str(SCHEMA_JSON).unwrap()
}
const GLOBAL_APPLICATION_ID: &str = "org.mozilla.glean.test.app";
// Create a new instance of Glean with a temporary directory.
// We need to keep the `TempDir` alive, so that it's not deleted before we stop using it.
fn new_glean(configuration: Option<Configuration>) -> tempfile::TempDir {
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = match configuration {
Some(c) => c,
None => Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
};
let client_info = ClientInfoMetrics {
app_build: env!("CARGO_PKG_VERSION").to_string(),
app_display_version: env!("CARGO_PKG_VERSION").to_string(),
};
glean::initialize(cfg, client_info);
dir
}
#[test]
fn validate_against_schema() {
let schema = load_schema();
let (s, r) = crossbeam_channel::bounded::<Vec<u8>>(1);
// Define a fake uploader that reports back the submitted payload
// using a crossbeam channel.
#[derive(Debug)]
pub struct ValidatingUploader {
sender: crossbeam_channel::Sender<Vec<u8>>,
};
impl glean::net::PingUploader for ValidatingUploader {
fn upload(
&self,
_url: String,
body: Vec<u8>,
_headers: Vec<(String, String)>,
) -> glean::net::UploadResult {
self.sender.send(body).unwrap();
glean::net::UploadResult::HttpStatus(200)
}
}
// Create a custom configuration to use a validating uploader.
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: Some(Box::new(ValidatingUploader { sender: s })),
};
let _ = new_glean(Some(cfg));
// Define a new ping and submit it.
const PING_NAME: &str = "test-ping";
let custom_ping = glean::private::PingType::new(PING_NAME, true, true, vec![]);
custom_ping.submit(None);
// Wait for the ping to arrive.
let raw_body = r.recv().unwrap();
// Decode the gzipped body.
let mut gzip_decoder = GzDecoder::new(&raw_body[..]);
let mut s = String::with_capacity(raw_body.len());
let data = gzip_decoder
.read_to_string(&mut s)
.ok()
.map(|_| &s[..])
.or_else(|| std::str::from_utf8(&raw_body).ok())
.and_then(|payload| serde_json::from_str(payload).ok())
.unwrap();
// Now validate against the vendored schema
let cfg = jsonschema_valid::Config::from_schema(&schema, Some(Draft::Draft6)).unwrap();
let validation = cfg.validate(&data);
match validation {
Ok(()) => {}
Err(e) => {
let errors = e.map(|e| format!("{}", e)).collect::<Vec<_>>();
assert!(false, "Data: {:#?}\nErrors: {:#?}", data, errors);
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::io::Read;
use flate2::read::GzDecoder;
use jsonschema_valid::{self, schemas::Draft};
use serde_json::Value;
use glean::{ClientInfoMetrics, Configuration};
const SCHEMA_JSON: &str = include_str!("../../../glean.1.schema.json");
fn load_schema() -> Value {
serde_json::from_str(SCHEMA_JSON).unwrap()
}
const GLOBAL_APPLICATION_ID: &str = "org.mozilla.glean.test.app";
// Create a new instance of Glean with a temporary directory.
// We need to keep the `TempDir` alive, so that it's not deleted before we stop using it.
fn new_glean(configuration: Option<Configuration>) -> tempfile::TempDir {
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = match configuration {
Some(c) => c,
None => Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: None,
},
};
let client_info = ClientInfoMetrics {
app_build: env!("CARGO_PKG_VERSION").to_string(),
app_display_version: env!("CARGO_PKG_VERSION").to_string(),
};
glean::initialize(cfg, client_info);
dir
}
#[test]
fn validate_against_schema() {
let schema = load_schema();
let (s, r) = crossbeam_channel::bounded::<Vec<u8>>(1);
// Define a fake uploader that reports back the submitted payload
// using a crossbeam channel.
#[derive(Debug)]
pub struct ValidatingUploader {
sender: crossbeam_channel::Sender<Vec<u8>>,
};
impl glean::net::PingUploader for ValidatingUploader {
fn upload(
&self,
_url: String,
body: Vec<u8>,
_headers: Vec<(String, String)>,
) -> glean::net::UploadResult {
self.sender.send(body).unwrap();
glean::net::UploadResult::HttpStatus(200)
}
}
// Create a custom configuration to use a validating uploader.
let dir = tempfile::tempdir().unwrap();
let tmpname = dir.path().display().to_string();
let cfg = Configuration {
data_path: tmpname,
application_id: GLOBAL_APPLICATION_ID.into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
channel: Some("testing".into()),
server_endpoint: Some("invalid-test-host".into()),
uploader: Some(Box::new(ValidatingUploader { sender: s })),
};
let _ = new_glean(Some(cfg));
// Define a new ping and submit it.
const PING_NAME: &str = "test-ping";
let custom_ping = glean::private::PingType::new(PING_NAME, true, true, vec![]);
custom_ping.submit(None);
// Wait for the ping to arrive.
let raw_body = r.recv().unwrap();
// Decode the gzipped body.
let mut gzip_decoder = GzDecoder::new(&raw_body[..]);
let mut s = String::with_capacity(raw_body.len());
let data = gzip_decoder
.read_to_string(&mut s)
.ok()
.map(|_| &s[..])
.or_else(|| std::str::from_utf8(&raw_body).ok())
.and_then(|payload| serde_json::from_str(payload).ok())
.unwrap();
// Now validate against the vendored schema
let cfg = jsonschema_valid::Config::from_schema(&schema, Some(Draft::Draft6)).unwrap();
let validation = cfg.validate(&data);
match validation {
Ok(()) => {}
Err(e) => {
let errors = e.map(|e| format!("{}", e)).collect::<Vec<_>>();
assert!(false, "Data: {:#?}\nErrors: {:#?}", data, errors);
}
}
}

Просмотреть файл

@ -6,8 +6,8 @@ edition = "2018"
license = "MPL-2.0"
[dependencies]
glean = "33.5.0"
glean-core = { version = "33.5.0", features = ["rkv-safe-mode"] }
glean = "33.4.0"
glean-core = { version = "33.4.0", features = ["rkv-safe-mode"] }
log = "0.4"
nserror = { path = "../../../xpcom/rust/nserror" }
nsstring = { path = "../../../xpcom/rust/nsstring" }

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше