Backed out 3 changesets (bug 1675534) for causing build bustage. CLOSED TREE

Backed out changeset ee45a6abdadf (bug 1675534)
Backed out changeset 2c75ab4daa47 (bug 1675534)
Backed out changeset 35d4d9bddadd (bug 1675534)
This commit is contained in:
Csoregi Natalia 2020-11-09 13:36:09 +02:00
Родитель ed50c11121
Коммит d57cf574ea
203 изменённых файлов: 14511 добавлений и 23784 удалений

60
Cargo.lock сгенерированный
Просмотреть файл

@ -1,11 +1,5 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "adler"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
[[package]]
name = "adler32"
version = "1.0.4"
@ -268,9 +262,9 @@ checksum = "88ceb0d16c4fd0e42876e298d7d3ce3780dd9ebdcbe4199816a32c77e08597ff"
[[package]]
name = "bincode"
version = "1.3.1"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d"
checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf"
dependencies = [
"byteorder",
"serde",
@ -1465,11 +1459,11 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d"
[[package]]
name = "flate2"
version = "1.0.19"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129"
checksum = "ad3c5233c9a940c8719031b423d7e6c16af66e031cb0420b0896f5245bf181d3"
dependencies = [
"cfg-if 1.0.0",
"cfg-if 0.1.10",
"crc32fast",
"libc",
"miniz_oxide",
@ -1573,7 +1567,6 @@ dependencies = [
"chrono",
"crossbeam-channel",
"ffi-support",
"glean",
"glean-core",
"log",
"nsstring",
@ -1598,7 +1591,6 @@ version = "0.1.0"
dependencies = [
"cstr",
"fog",
"glean",
"glean-core",
"log",
"nserror",
@ -2106,25 +2098,11 @@ dependencies = [
"gl_generator",
]
[[package]]
name = "glean"
version = "33.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b392b31d576ae3b226b692cbc580b1a36700de162ae5570cfe2694f38edbb44"
dependencies = [
"crossbeam-channel",
"glean-core",
"inherent",
"log",
"once_cell",
"thiserror",
]
[[package]]
name = "glean-core"
version = "33.1.2"
version = "31.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30fcf688d2750a66d5a8239b27344459a5a4a4f6fb80380612c4ccc0aac9f5db"
checksum = "c4bade418d78159f19dcd26e380b48ff04f43b8cb457aa64711a15693c09e481"
dependencies = [
"bincode",
"chrono",
@ -2132,7 +2110,7 @@ dependencies = [
"flate2",
"log",
"once_cell",
"rkv 0.15.0",
"rkv 0.10.4",
"serde",
"serde_json",
"uuid",
@ -2448,17 +2426,6 @@ dependencies = [
"adler32",
]
[[package]]
name = "inherent"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2db418475edf9eb55fec92b7527be6d9a7b880bff6651cb0c0345af57f1cadf"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "inplace_it"
version = "0.3.2"
@ -3089,12 +3056,11 @@ dependencies = [
[[package]]
name = "miniz_oxide"
version = "0.4.3"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
checksum = "7108aff85b876d06f22503dcce091e29f76733b2bfdd91eebce81f5e68203a10"
dependencies = [
"adler",
"autocfg 1.0.1",
"adler32",
]
[[package]]
@ -3637,9 +3603,9 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.4.1"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad"
checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d"
[[package]]
name = "opaque-debug"

Просмотреть файл

@ -62,14 +62,14 @@ commands:
jobs:
build-36:
docker:
- image: circleci/python:3.6.12
- image: circleci/python:3.6.9
steps:
- test-start
- test-python-version
build-36-min:
docker:
- image: circleci/python:3.6.12
- image: circleci/python:3.6.9
steps:
- test-start
- test-min-requirements
@ -77,7 +77,7 @@ jobs:
build-37:
docker:
- image: circleci/python:3.7.9
- image: circleci/python:3.7.5
steps:
- test-start
- test-python-version
@ -92,26 +92,19 @@ jobs:
build-38:
docker:
- image: circleci/python:3.8.5
- image: circleci/python:3.8.0
steps:
- test-start
- test-python-version
build-38-min:
docker:
- image: circleci/python:3.8.5
- image: circleci/python:3.8.0
steps:
- test-start
- test-min-requirements
- test-python-version
build-39:
docker:
- image: circleci/python:3.9.0rc1
steps:
- test-start
- test-python-version
docs-deploy:
docker:
- image: node:8.10.0
@ -181,10 +174,6 @@ workflows:
filters:
tags:
only: /.*/
- build-39:
filters:
tags:
only: /.*/
- docs-deploy:
requires:
- build-37

Просмотреть файл

@ -141,7 +141,7 @@ Get a clean main branch with all of the changes from `upstream`::
- Make sure all your changes are committed.
- Push the changes upstream. (Normally pushing directly without review is frowned upon, but the `main` branch is protected from force pushes and release tagging requires the same permissions as pushing to `main`)::
- Push the changes upstream::
$ git push upstream main
@ -149,12 +149,8 @@ Get a clean main branch with all of the changes from `upstream`::
- Make the release on GitHub using [this link](https://github.com/mozilla/glean_parser/releases/new)
- Both the tag and the release title should be in the form `vX.Y.Z`.
- Enter the new version in the form `vX.Y.Z`.
- Copy and paste the relevant part of the `HISTORY.rst` file into the description.
- Tagging the release will trigger a CI workflow which will build the distribution of `glean_parser` and publish it to PyPI.
The continuous integration system will then automatically deploy to PyPI.
See also the [instructions for updating the version of `glean_parser` used by the Glean SDK](https://mozilla.github.io/glean/book/dev/upgrading-glean-parser.html).

12
third_party/python/glean_parser/HISTORY.rst поставляемый
Просмотреть файл

@ -5,18 +5,6 @@ History
Unreleased
----------
1.29.0 (2020-10-07)
-------------------
* **Breaking change:** `glean_parser` will now return an error code when any of the input files do not exist (unless the `--allow-missing-files` flag is passed).
* Generated code now includes a comment next to each metric containing the name of the metric in its original `snake_case` form.
* When metrics don't provide a `unit` parameter, it is not included in the output (as provided by probe-scraper).
1.28.6 (2020-09-24)
-------------------
* BUGFIX: Ensure Kotlin arguments are deterministically ordered
1.28.5 (2020-09-14)
-------------------

4
third_party/python/glean_parser/Makefile поставляемый
Просмотреть файл

@ -36,7 +36,9 @@ clean-test: ## remove test and coverage artifacts
lint: ## check style with flake8
python3 -m flake8 glean_parser tests
python3 -m black --check glean_parser tests setup.py
if python3 --version | grep 'Python 3\.[678]\..*'; then \
python3 -m black --check glean_parser tests setup.py; \
fi
python3 -m yamllint glean_parser tests
python3 -m mypy glean_parser

15
third_party/python/glean_parser/PKG-INFO поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: glean_parser
Version: 1.29.0
Version: 1.28.5
Summary: Parser tools for Mozilla's Glean telemetry
Home-page: https://github.com/mozilla/glean_parser
Author: Michael Droettboom
@ -69,18 +69,6 @@ Description: ============
Unreleased
----------
1.29.0 (2020-10-07)
-------------------
* **Breaking change:** `glean_parser` will now return an error code when any of the input files do not exist (unless the `--allow-missing-files` flag is passed).
* Generated code now includes a comment next to each metric containing the name of the metric in its original `snake_case` form.
* When metrics don't provide a `unit` parameter, it is not included in the output (as provided by probe-scraper).
1.28.6 (2020-09-24)
-------------------
* BUGFIX: Ensure Kotlin arguments are deterministically ordered
1.28.5 (2020-09-14)
-------------------
@ -487,4 +475,3 @@ Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9

Просмотреть файл

@ -53,12 +53,7 @@ from . import validate_ping
"Should only be set when building the Glean library itself."
),
)
@click.option(
"--allow-missing-files",
is_flag=True,
help=("Do not treat missing input files as an error."),
)
def translate(input, format, output, option, allow_reserved, allow_missing_files):
def translate(input, format, output, option, allow_reserved):
"""
Translate metrics.yaml and pings.yaml files to other formats.
"""
@ -73,10 +68,7 @@ def translate(input, format, output, option, allow_reserved, allow_missing_files
format,
Path(output),
option_dict,
{
"allow_reserved": allow_reserved,
"allow_missing_files": allow_missing_files,
},
{"allow_reserved": allow_reserved},
)
)
@ -120,24 +112,11 @@ def check(schema):
"Should only be set when building the Glean library itself."
),
)
@click.option(
"--allow-missing-files",
is_flag=True,
help=("Do not treat missing input files as an error."),
)
def glinter(input, allow_reserved, allow_missing_files):
def glinter(input, allow_reserved):
"""
Runs a linter over the metrics.
"""
sys.exit(
lint.glinter(
[Path(x) for x in input],
{
"allow_reserved": allow_reserved,
"allow_missing_files": allow_missing_files,
},
)
)
sys.exit(lint.glinter([Path(x) for x in input], {"allow_reserved": allow_reserved}))
@click.group()
@ -152,18 +131,5 @@ main.add_command(check)
main.add_command(glinter)
def main_wrapper(args=None):
"""
A simple wrapper around click's `main` to display the glean_parser version
when there is an error.
"""
try:
main(args=args)
except SystemExit as e:
if e.code != 0:
print(f"ERROR running glean_parser v{glean_parser.__version__}")
raise
if __name__ == "__main__":
main_wrapper() # pragma: no cover
sys.exit(main()) # pragma: no cover

Просмотреть файл

@ -11,7 +11,7 @@ Outputter to generate C# code for metrics.
import enum
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Union # noqa
from typing import Any, Dict, List, Union # noqa
from . import metrics
from . import pings
@ -104,7 +104,7 @@ def class_name(obj_type: str) -> str:
def output_csharp(
objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
"""
Given a tree of objects, output C# code to `output_dir`.
@ -120,9 +120,6 @@ def output_csharp(
This is where glean objects will be imported from in the generated
code.
"""
if options is None:
options = {}
template = util.get_jinja2_template(
"csharp.jinja2",
filters=(

Просмотреть файл

@ -12,7 +12,7 @@ from collections import OrderedDict
import enum
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Union # noqa
from typing import Any, Dict, List, Union # noqa
from . import metrics
from . import pings
@ -102,7 +102,7 @@ def class_name(obj_type: str) -> str:
def output_gecko_lookup(
objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
"""
Given a tree of objects, generate a Kotlin map between Gecko histograms and
@ -119,9 +119,6 @@ def output_gecko_lookup(
This is where glean objects will be imported from in the generated
code.
"""
if options is None:
options = {}
template = util.get_jinja2_template(
"kotlin.geckoview.jinja2",
filters=(
@ -200,7 +197,7 @@ def output_gecko_lookup(
def output_kotlin(
objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
"""
Given a tree of objects, output Kotlin code to `output_dir`.
@ -216,9 +213,6 @@ def output_kotlin(
This is where glean objects will be imported from in the generated
code.
"""
if options is None:
options = {}
template = util.get_jinja2_template(
"kotlin.jinja2",
filters=(

Просмотреть файл

@ -7,16 +7,7 @@ import enum
from pathlib import Path
import re
import sys
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Iterable,
Optional,
Tuple,
) # noqa
from typing import Any, Callable, Dict, Generator, List, Iterable, Tuple, Union # noqa
from . import metrics
@ -105,7 +96,7 @@ def check_common_prefix(
def check_unit_in_name(
metric: metrics.Metric, parser_config: Dict[str, Any]
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
"""
The metric name ends in a unit.
@ -198,7 +189,7 @@ def check_category_generic(
def check_bug_number(
metric: metrics.Metric, parser_config: Dict[str, Any]
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
number_bugs = [str(bug) for bug in metric.bugs if isinstance(bug, int)]
@ -211,7 +202,7 @@ def check_bug_number(
def check_valid_in_baseline(
metric: metrics.Metric, parser_config: Dict[str, Any]
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
allow_reserved = parser_config.get("allow_reserved", False)
@ -223,7 +214,7 @@ def check_valid_in_baseline(
def check_misspelled_pings(
metric: metrics.Metric, parser_config: Dict[str, Any]
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
for ping in metric.send_in_pings:
for builtin in pings.RESERVED_PING_NAMES:
@ -233,7 +224,7 @@ def check_misspelled_pings(
def check_user_lifetime_expiration(
metric: metrics.Metric, parser_config: Dict[str, Any]
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
if metric.lifetime == metrics.Lifetime.user and metric.expires != "never":
@ -245,7 +236,7 @@ def check_user_lifetime_expiration(
def check_expired_date(
metric: metrics.Metric, parser_config: Dict[str, Any]
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
try:
metric.validate_expires()
@ -254,7 +245,7 @@ def check_expired_date(
def check_expired_metric(
metric: metrics.Metric, parser_config: Dict[str, Any]
metric: metrics.Metric, parser_config: Dict[str, Any] = {}
) -> LintGenerator:
if metric.is_expired():
yield ("Metric has expired. Please consider removing it.")
@ -300,9 +291,7 @@ class GlinterNit:
def lint_metrics(
objs: metrics.ObjectTree,
parser_config: Optional[Dict[str, Any]] = None,
file=sys.stderr,
objs: metrics.ObjectTree, parser_config: Dict[str, Any] = {}, file=sys.stderr
) -> List[GlinterNit]:
"""
Performs glinter checks on a set of metrics objects.
@ -311,9 +300,6 @@ def lint_metrics(
:param file: The stream to write errors to.
:returns: List of nits.
"""
if parser_config is None:
parser_config = {}
nits: List[GlinterNit] = []
for (category_name, category) in sorted(list(objs.items())):
if category_name == "pings":
@ -336,7 +322,7 @@ def lint_metrics(
for msg in cat_check_func(category_name, category_metrics.values())
)
for (_metric_name, metric) in sorted(list(category_metrics.items())):
for (metric_name, metric) in sorted(list(category_metrics.items())):
for (check_name, (check_func, check_type)) in INDIVIDUAL_CHECKS.items():
new_nits = list(check_func(metric, parser_config))
if len(new_nits):
@ -368,11 +354,7 @@ def lint_metrics(
return nits
def lint_yaml_files(
input_filepaths: Iterable[Path],
file=sys.stderr,
parser_config: Dict[str, Any] = None,
) -> List:
def lint_yaml_files(input_filepaths: Iterable[Path], file=sys.stderr) -> List:
"""
Performs glinter YAML lint on a set of files.
@ -381,16 +363,10 @@ def lint_yaml_files(
:returns: List of nits.
"""
if parser_config is None:
parser_config = {}
# Generic type since the actual type comes from yamllint, which we don't
# control.
nits: List = []
for path in input_filepaths:
if not path.is_file() and parser_config.get("allow_missing_files", False):
continue
# yamllint needs both the file content and the path.
file_content = None
with path.open("r", encoding="utf-8") as fd:
@ -410,9 +386,7 @@ def lint_yaml_files(
def glinter(
input_filepaths: Iterable[Path],
parser_config: Optional[Dict[str, Any]] = None,
file=sys.stderr,
input_filepaths: Iterable[Path], parser_config: Dict[str, Any] = {}, file=sys.stderr
) -> int:
"""
Commandline helper for glinter.
@ -423,10 +397,7 @@ def glinter(
:param file: The stream to write the errors to.
:return: Non-zero if there were any glinter errors.
"""
if parser_config is None:
parser_config = {}
if lint_yaml_files(input_filepaths, file=file, parser_config=parser_config):
if lint_yaml_files(input_filepaths, file=file):
return 1
objs = parser.parse_objects(input_filepaths, parser_config)

Просмотреть файл

@ -42,9 +42,7 @@ def extra_info(obj: Union[metrics.Metric, pings.Ping]) -> List[Tuple[str, str]]:
return extra_info
def ping_desc(
ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
) -> str:
def ping_desc(ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}) -> str:
"""
Return a text description of the ping. If a custom_pings_cache
is available, look in there for non-reserved ping names description.
@ -58,7 +56,7 @@ def ping_desc(
)
elif ping_name == "all-pings":
desc = "These metrics are sent in every ping."
elif custom_pings_cache is not None and ping_name in custom_pings_cache:
elif ping_name in custom_pings_cache:
desc = custom_pings_cache[ping_name].description
return desc
@ -89,10 +87,8 @@ def ping_docs(ping_name: str) -> str:
return f"https://mozilla.github.io/glean/book/user/pings/{ping_name}.html"
def if_empty(
ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
) -> bool:
if custom_pings_cache is not None and ping_name in custom_pings_cache:
def if_empty(ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}) -> bool:
if ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].send_if_empty
else:
return False
@ -113,27 +109,27 @@ def ping_reasons(
def ping_data_reviews(
ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}
) -> Optional[List[str]]:
if custom_pings_cache is not None and ping_name in custom_pings_cache:
if ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].data_reviews
else:
return None
def ping_bugs(
ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}
) -> Optional[List[str]]:
if custom_pings_cache is not None and ping_name in custom_pings_cache:
if ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].bugs
else:
return None
def ping_include_client_id(
ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
ping_name: str, custom_pings_cache: Dict[str, pings.Ping] = {}
) -> bool:
if custom_pings_cache is not None and ping_name in custom_pings_cache:
if ping_name in custom_pings_cache:
return custom_pings_cache[ping_name].include_client_id
else:
return False
@ -149,7 +145,7 @@ def data_sensitivity_numbers(
def output_markdown(
objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
"""
Given a tree of objects, output Markdown docs to `output_dir`.
@ -163,8 +159,6 @@ def output_markdown(
:param options: options dictionary, with the following optional key:
- `project_title`: The projects title.
"""
if options is None:
options = {}
# Build a dictionary that associates pings with their metrics.
#
@ -183,7 +177,7 @@ def output_markdown(
# This also builds a dictionary of custom pings, if available.
custom_pings_cache: Dict[str, pings.Ping] = defaultdict()
metrics_by_pings: Dict[str, List[metrics.Metric]] = defaultdict(list)
for _category_key, category_val in objs.items():
for category_key, category_val in objs.items():
for obj in category_val.values():
# Filter out custom pings. We will need them for extracting
# the description

Просмотреть файл

@ -52,7 +52,7 @@ class Metric:
disabled: bool = False,
lifetime: str = "ping",
send_in_pings: Optional[List[str]] = None,
unit: Optional[str] = None,
unit: str = "",
gecko_datapoint: str = "",
no_lint: Optional[List[str]] = None,
data_sensitivity: Optional[List[str]] = None,
@ -78,8 +78,7 @@ class Metric:
if send_in_pings is None:
send_in_pings = ["default"]
self.send_in_pings = send_in_pings
if unit is not None:
self.unit = unit
self.unit = unit
self.gecko_datapoint = gecko_datapoint
if no_lint is None:
no_lint = []
@ -121,7 +120,7 @@ class Metric:
category: str,
name: str,
metric_info: Dict[str, util.JSONType],
config: Optional[Dict[str, Any]] = None,
config: Dict[str, Any] = {},
validated: bool = False,
):
"""
@ -137,9 +136,6 @@ class Metric:
jsonschema validation
:return: A new Metric instance.
"""
if config is None:
config = {}
metric_type = metric_info["type"]
if not isinstance(metric_type, str):
raise TypeError(f"Unknown metric type {metric_type}")

Просмотреть файл

@ -53,21 +53,13 @@ def _update_validator(validator):
def _load_file(
filepath: Path, parser_config: Dict[str, Any]
filepath: Path,
) -> Generator[str, None, Tuple[Dict[str, util.JSONType], Optional[str]]]:
"""
Load a metrics.yaml or pings.yaml format file.
If the `filepath` does not exist, raises `FileNotFoundError`, unless
`parser_config["allow_missing_files"]` is `True`.
"""
try:
content = util.load_yaml_or_json(filepath, ordered_dict=True)
except FileNotFoundError:
if not parser_config.get("allow_missing_files", False):
raise
else:
return {}, None
except Exception as e:
yield util.format_error(filepath, "", textwrap.fill(str(e)))
return {}, None
@ -321,7 +313,7 @@ def _preprocess_objects(objs: ObjectTree, config: Dict[str, Any]) -> ObjectTree:
@util.keep_value
def parse_objects(
filepaths: Iterable[Path], config: Optional[Dict[str, Any]] = None
filepaths: Iterable[Path], config: Dict[str, Any] = {}
) -> Generator[str, None, ObjectTree]:
"""
Parse one or more metrics.yaml and/or pings.yaml files, returning a tree of
@ -350,17 +342,12 @@ def parse_objects(
This is useful when you want to retain the original "disabled"
value from the `metrics.yaml`, rather than having it overridden when
the metric expires.
- `allow_missing_files`: Do not raise a `FileNotFoundError` if any of
the input `filepaths` do not exist.
"""
if config is None:
config = {}
all_objects: ObjectTree = OrderedDict()
sources: Dict[Any, Path] = {}
filepaths = util.ensure_list(filepaths)
for filepath in filepaths:
content, filetype = yield from _load_file(filepath, config)
content, filetype = yield from _load_file(filepath)
if filetype == "metrics":
yield from _instantiate_metrics(
all_objects, sources, content, filepath, config

Просмотреть файл

@ -73,9 +73,3 @@ class Ping:
d = self.__dict__.copy()
del d["name"]
return d
def identifier(self) -> str:
"""
Used for the "generated from ..." comment in the output.
"""
return self.name

Просмотреть файл

@ -11,7 +11,7 @@ Outputter to generate Swift code for metrics.
import enum
import json
from pathlib import Path
from typing import Any, Dict, Optional, Union
from typing import Any, Dict, Union
from . import metrics
from . import pings
@ -117,7 +117,7 @@ class Category:
def output_swift(
objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
objs: metrics.ObjectTree, output_dir: Path, options: Dict[str, Any] = {}
) -> None:
"""
Given a tree of objects, output Swift code to `output_dir`.
@ -130,9 +130,6 @@ def output_swift(
- glean_namespace: The namespace to import Glean from
- allow_reserved: When True, this is a Glean-internal build
"""
if options is None:
options = {}
template = util.get_jinja2_template(
"swift.jinja2",
filters=(

Просмотреть файл

@ -11,7 +11,7 @@ Jinja2 template is not. Please file bugs! #}
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
{% macro obj_declaration(obj, suffix='', access='', lazy=False) %}
{{ access }} {% if lazy %} Lazy<{{ obj|type_name }}>{%- else %} {{ obj|type_name }}{% endif %} {{ obj.name|camelize }}{{ suffix }}
{%- if lazy %} = new Lazy<{{ obj|type_name }}>(() => {%- else %} = // generated from {{ obj.identifier() }}{% endif %}
{%- if lazy %} = new Lazy<{{ obj|type_name }}>(() => {%- else %} ={% endif %}
new {{ obj|type_name }}(
{% for arg_name in extra_args if obj[arg_name] is defined %}
@ -57,7 +57,7 @@ namespace {{ namespace }}
{% for obj in objs.values() %}
{% if obj.labeled %}
{{ obj_declaration(obj, 'Label', 'private ') }}
private readonly Lazy<LabeledMetricType<{{ obj|type_name }}>> {{ obj.name|camelize }}Lazy = new Lazy<LabeledMetricType<{{ obj|type_name }}>>(() => new LabeledMetricType<{{ obj|type_name }}>( // generated from {{ obj.identifier() }}
private readonly Lazy<LabeledMetricType<{{ obj|type_name }}>> {{ obj.name|camelize }}Lazy = new Lazy<LabeledMetricType<{{ obj|type_name }}>>(() => new LabeledMetricType<{{ obj|type_name }}>(
category: {{ obj.category|csharp }},
name: {{ obj.name|csharp }},
submetric: {{ category_name|Camelize }}.{{ obj.name|camelize }}Label,
@ -82,7 +82,7 @@ namespace {{ namespace }}
/// <summary>
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
/// </summary>
internal {{ obj|type_name }} {{ obj.name|camelize }} => {{ obj.name|camelize }}Lazy.Value; // generated from {{ obj.identifier() }}
internal {{ obj|type_name }} {{ obj.name|camelize }} => {{ obj.name|camelize }}Lazy.Value;
{% else %}
{# Finally handle pings. #}

Просмотреть файл

@ -13,7 +13,7 @@ Jinja2 template is not. Please file bugs! #}
{% if (access != "private ") -%}
@get:JvmName("{{ obj.name|camelize }}{{ suffix }}")
{% endif -%}
{{ access }}val {{ obj.name|camelize }}{{ suffix }}: {{ obj|type_name }}{% if lazy %} by lazy { {%- else %} ={% endif %} // generated from {{ obj.identifier() }}
{{ access }}val {{ obj.name|camelize }}{{ suffix }}: {{ obj|type_name }}{% if lazy %} by lazy { {%- else %} ={% endif %}
{{ obj|type_name }}(
{% for arg_name in extra_args if obj[arg_name] is defined %}
@ -60,7 +60,7 @@ internal object {{ category_name|Camelize }} {
/**
* {{ obj.description|wordwrap() | replace('\n', '\n * ') }}
*/
val {{ obj.name|camelize }}: LabeledMetricType<{{ obj|type_name }}> by lazy { // generated from {{ obj.identifier() }}
val {{ obj.name|camelize }}: LabeledMetricType<{{ obj|type_name }}> by lazy {
LabeledMetricType(
category = {{ obj.category|kotlin }},
name = {{ obj.name|kotlin }},

Просмотреть файл

@ -8,7 +8,7 @@ Jinja2 template is not. Please file bugs! #}
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
{% macro obj_declaration(obj, suffix='', access='') %}
{{ access }}static let {{ obj.name|camelize|variable_name }}{{ suffix }} = {{ obj|type_name }}( // generated from {{ obj.identifier() }}
{{ access }}static let {{ obj.name|camelize|variable_name }}{{ suffix }} = {{ obj|type_name }}(
{% for arg_name in extra_args if obj[arg_name] is defined %}
{{ arg_name|camelize }}: {{ obj[arg_name]|swift }}{{ "," if not loop.last }}
{% endfor %}
@ -86,7 +86,7 @@ extension {{ namespace }} {
{% if obj.labeled %}
{{ obj_declaration(obj, 'Label', 'private ') }}
/// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
static let {{ obj.name|camelize|variable_name }} = try! LabeledMetricType<{{ obj|type_name }}>( // generated from {{ obj.identifier() }}
static let {{ obj.name|camelize|variable_name }} = try! LabeledMetricType<{{ obj|type_name }}>(
category: {{ obj.category|swift }},
name: {{ obj.name|swift }},
sendInPings: {{ obj.send_in_pings|swift }},

Просмотреть файл

@ -12,7 +12,7 @@ from pathlib import Path
import os
import shutil
import tempfile
from typing import Any, Callable, Dict, Iterable, List, Optional
from typing import Any, Callable, Dict, Iterable, List
from . import lint
from . import parser
@ -40,11 +40,8 @@ class Outputter:
def __init__(
self,
output_func: Callable[[metrics.ObjectTree, Path, Dict[str, Any]], None],
clear_patterns: Optional[List[str]] = None,
clear_patterns: List[str] = [],
):
if clear_patterns is None:
clear_patterns = []
self.output_func = output_func
self.clear_patterns = clear_patterns
@ -52,7 +49,7 @@ class Outputter:
OUTPUTTERS = {
"csharp": Outputter(csharp.output_csharp, ["*.cs"]),
"kotlin": Outputter(kotlin.output_kotlin, ["*.kt"]),
"markdown": Outputter(markdown.output_markdown, []),
"markdown": Outputter(markdown.output_markdown),
"swift": Outputter(swift.output_swift, ["*.swift"]),
}
@ -61,9 +58,9 @@ def translate_metrics(
input_filepaths: Iterable[Path],
output_dir: Path,
translation_func: Callable[[metrics.ObjectTree, Path, Dict[str, Any]], None],
clear_patterns: Optional[List[str]] = None,
options: Optional[Dict[str, Any]] = None,
parser_config: Optional[Dict[str, Any]] = None,
clear_patterns: List[str] = [],
options: Dict[str, Any] = {},
parser_config: Dict[str, Any] = {},
):
"""
Translate the files in `input_filepaths` by running the metrics through a
@ -88,15 +85,6 @@ def translate_metrics(
:param parser_config: A dictionary of options that change parsing behavior.
See `parser.parse_metrics` for more info.
"""
if clear_patterns is None:
clear_patterns = []
if options is None:
options = {}
if parser_config is None:
parser_config = {}
input_filepaths = util.ensure_list(input_filepaths)
if lint.glinter(input_filepaths, parser_config):
@ -140,8 +128,8 @@ def translate(
input_filepaths: Iterable[Path],
output_format: str,
output_dir: Path,
options: Optional[Dict[str, Any]] = None,
parser_config: Optional[Dict[str, Any]] = None,
options: Dict[str, Any] = {},
parser_config: Dict[str, Any] = {},
):
"""
Translate the files in `input_filepaths` to the given `output_format` and
@ -155,12 +143,6 @@ def translate(
:param parser_config: A dictionary of options that change parsing behavior.
See `parser.parse_metrics` for more info.
"""
if options is None:
options = {}
if parser_config is None:
parser_config = {}
format_desc = OUTPUTTERS.get(output_format, None)
if format_desc is None:

Просмотреть файл

@ -114,12 +114,14 @@ def load_yaml_or_json(path: Path, ordered_dict: bool = False):
:param path: `pathlib.Path` object
:rtype object: The tree of objects as a result of parsing the file.
:raises ValueError: The file is neither a .json, .yml or .yaml file.
:raises FileNotFoundError: The file does not exist.
"""
# If in py.test, support bits of literal JSON/YAML content
if TESTING_MODE and isinstance(path, dict):
return path
if not path.is_file():
return {}
if path.suffix == ".json":
with path.open("r", encoding="utf-8") as fd:
return json.load(fd)
@ -432,7 +434,5 @@ extra_ping_args = [
]
# Names of parameters to pass to both metric and ping constructors (no duplicates).
extra_args = extra_metric_args + [
v for v in extra_ping_args if v not in extra_metric_args
]
# Names of parameters to pass to both metric and ping constructors.
extra_args = list(set(extra_metric_args) | set(extra_ping_args))

Просмотреть файл

@ -1,14 +1,13 @@
black==20.8b1
coverage==5.3
flake8==3.8.4
flake8-bugbear==20.1.4
coverage==5.2.1
flake8==3.8.3
m2r==0.2.1
mypy==0.782
pip
pytest-runner==5.2
pytest==6.1.1
pytest==6.0.1
Sphinx==3.2.1
twine==3.2.0
watchdog==0.10.3
wheel
yamllint==1.25.0
yamllint==1.24.2

3
third_party/python/glean_parser/setup.py поставляемый
Просмотреть файл

@ -51,12 +51,11 @@ setup(
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
description="Parser tools for Mozilla's Glean telemetry",
entry_points={
"console_scripts": [
"glean_parser=glean_parser.__main__:main_wrapper",
"glean_parser=glean_parser.__main__:main",
],
},
install_requires=requirements,

2
third_party/python/requirements.in поставляемый
Просмотреть файл

@ -26,7 +26,7 @@ ecdsa==0.15
esprima==4.0.1
fluent.migrate==0.10
fluent.syntax==0.18.1
glean_parser==1.29.0
glean_parser==1.28.5
jsmin==2.1.0
json-e==2.7.0
mozilla-version==0.3.4

6
third_party/python/requirements.txt поставляемый
Просмотреть файл

@ -87,9 +87,9 @@ fluent.syntax==0.18.1 \
--hash=sha256:0e63679fa4f1b3042565220a5127b4bab842424f07d6a13c12299e3b3835486a \
--hash=sha256:3a55f5e605d1b029a65cc8b6492c86ec4608e15447e73db1495de11fd46c104f \
# via -r requirements-mach-vendor-python.in, compare-locales, fluent.migrate
glean_parser==1.29.0 \
--hash=sha256:7cf1b02ef87fad57bf0f6b9711a98c1fd8f89c9df702245d16c09bf1b042a255 \
--hash=sha256:df7436e164148594176ec55f7d7c3c5c944daca67c3cc30428514628625b214b \
glean_parser==1.28.5 \
--hash=sha256:29ac33298898e0fd607163b704d68f598c1d118c5056852246d621ec26f973bb \
--hash=sha256:330e045fd8410f661e8e4a67edc8ab4a125996381e5519c40ca98b34b9dc5ec8 \
# via -r requirements-mach-vendor-python.in
jinja2==2.11.2 \
--hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \

1
third_party/rust/adler/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"042ed3158af7000c88a6617d775f11456bd30f6c7c8b5b586978faa1e11b1e24","Cargo.toml":"107d13689eecfa82a8b5ae35bf835b9d2775337226630e4bdb35f22d0dd52e18","LICENSE-0BSD":"861399f8c21c042b110517e76dc6b63a2b334276c8cf17412fc3c8908ca8dc17","LICENSE-APACHE":"8ada45cd9f843acf64e4722ae262c622a2b3b3007c7310ef36ac1061a30f6adb","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"fa83fd5ee10b61827de382e496bf66296a526e3d2c3b2aa5ad672aa15e8d2d7f","RELEASE_PROCESS.md":"a86cd10fc70f167f8d00e9e4ce0c6b4ebdfa1865058390dffd1e0ad4d3e68d9d","benches/bench.rs":"c07ce370e3680c602e415f8d1ec4e543ea2163ab22a09b6b82d93e8a30adca82","src/algo.rs":"b664b131f724a809591394a10b9023f40ab5963e32a83fa3163c2668e59c8b66","src/lib.rs":"67f3ca5b6333e22745b178b70f472514162cea2890344724f0f66995fcf19806"},"package":"ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"}

33
third_party/rust/adler/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,33 +0,0 @@
# Changelog
## Unreleased
No changes.
## [0.2.3 - 2020-07-11](https://github.com/jonas-schievink/adler/releases/tag/v0.2.3)
- Process 4 Bytes at a time, improving performance by up to 50% ([#2]).
## [0.2.2 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.2)
- Bump MSRV to 1.31.0.
## [0.2.1 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.1)
- Add a few `#[inline]` annotations to small functions.
- Fix CI badge.
- Allow integration into libstd.
## [0.2.0 - 2020-06-27](https://github.com/jonas-schievink/adler/releases/tag/v0.2.0)
- Support `#![no_std]` when using `default-features = false`.
- Improve performance by around 7x.
- Support Rust 1.8.0.
- Improve API naming.
## [0.1.0 - 2020-06-26](https://github.com/jonas-schievink/adler/releases/tag/v0.1.0)
Initial release.
[#2]: https://github.com/jonas-schievink/adler/pull/2

69
third_party/rust/adler/Cargo.toml поставляемый
Просмотреть файл

@ -1,69 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "adler"
version = "0.2.3"
authors = ["Jonas Schievink <jonasschievink@gmail.com>"]
description = "A simple clean-room implementation of the Adler-32 checksum"
documentation = "https://docs.rs/adler/"
readme = "README.md"
keywords = ["checksum", "integrity", "hash", "adler32"]
categories = ["algorithms"]
license = "0BSD OR MIT OR Apache-2.0"
repository = "https://github.com/jonas-schievink/adler.git"
[package.metadata.docs.rs]
rustdoc-args = ["--cfg docsrs"]
[package.metadata.release]
no-dev-version = true
pre-release-commit-message = "Release {{version}}"
tag-message = "{{version}}"
[[package.metadata.release.pre-release-replacements]]
file = "CHANGELOG.md"
replace = "## Unreleased\n\nNo changes.\n\n## [{{version}} - {{date}}](https://github.com/jonas-schievink/adler/releases/tag/v{{version}})\n"
search = "## Unreleased\n"
[[package.metadata.release.pre-release-replacements]]
file = "README.md"
replace = "adler = \"{{version}}\""
search = "adler = \"[a-z0-9\\\\.-]+\""
[[package.metadata.release.pre-release-replacements]]
file = "src/lib.rs"
replace = "https://docs.rs/adler/{{version}}"
search = "https://docs.rs/adler/[a-z0-9\\.-]+"
[[bench]]
name = "bench"
harness = false
[dependencies.compiler_builtins]
version = "0.1.2"
optional = true
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dev-dependencies.criterion]
version = "0.3.2"
[features]
default = ["std"]
rustc-dep-of-std = ["core", "compiler_builtins"]
std = []
[badges.maintenance]
status = "actively-developed"
[badges.travis-ci]
repository = "jonas-schievink/adler"

12
third_party/rust/adler/LICENSE-0BSD поставляемый
Просмотреть файл

@ -1,12 +0,0 @@
Copyright (C) Jonas Schievink <jonasschievink@gmail.com>
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

201
third_party/rust/adler/LICENSE-APACHE поставляемый
Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/LICENSE-2.0
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
third_party/rust/adler/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

38
third_party/rust/adler/README.md поставляемый
Просмотреть файл

@ -1,38 +0,0 @@
# Adler-32 checksums for Rust
[![crates.io](https://img.shields.io/crates/v/adler.svg)](https://crates.io/crates/adler)
[![docs.rs](https://docs.rs/adler/badge.svg)](https://docs.rs/adler/)
![CI](https://github.com/jonas-schievink/adler/workflows/CI/badge.svg)
This crate provides a simple implementation of the Adler-32 checksum, used in
zlib, rsync, and other software.
Please refer to the [changelog](CHANGELOG.md) to see what changed in the last
releases.
## Features
- Permissively licensed (0BSD) clean-room implementation.
- Zero dependencies.
- Decent performance (3-4 GB/s).
- Supports `#![no_std]` (with `default-features = false`).
## Usage
Add an entry to your `Cargo.toml`:
```toml
[dependencies]
adler = "0.2.3"
```
Check the [API Documentation](https://docs.rs/adler/) for how to use the
crate's functionality.
## Rust version support
Currently, this crate supports all Rust versions starting at Rust 1.31.0.
Bumping the Minimum Supported Rust Version (MSRV) is *not* considered a breaking
change, but will not be done without good reasons. The latest 3 stable Rust
versions will always be supported no matter what.

13
third_party/rust/adler/RELEASE_PROCESS.md поставляемый
Просмотреть файл

@ -1,13 +0,0 @@
# What to do to publish a new release
1. Ensure all notable changes are in the changelog under "Unreleased".
2. Execute `cargo release <level>` to bump version(s), tag and publish
everything. External subcommand, must be installed with `cargo install
cargo-release`.
`<level>` can be one of `major|minor|patch`. If this is the first release
(`0.1.0`), use `minor`, since the version starts out as `0.0.0`.
3. Go to the GitHub releases, edit the just-pushed tag. Copy the release notes
from the changelog.

109
third_party/rust/adler/benches/bench.rs поставляемый
Просмотреть файл

@ -1,109 +0,0 @@
extern crate adler;
extern crate criterion;
use adler::{adler32_slice, Adler32};
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
fn simple(c: &mut Criterion) {
{
const SIZE: usize = 100;
let mut group = c.benchmark_group("simple-100b");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-100", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-100", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
{
const SIZE: usize = 1024;
let mut group = c.benchmark_group("simple-1k");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-1k", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-1k", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
{
const SIZE: usize = 1024 * 1024;
let mut group = c.benchmark_group("simple-1m");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("zeroes-1m", |bencher| {
bencher.iter(|| {
adler32_slice(&[0; SIZE]);
});
});
group.bench_function("ones-1m", |bencher| {
bencher.iter(|| {
adler32_slice(&[0xff; SIZE]);
});
});
}
}
fn chunked(c: &mut Criterion) {
const SIZE: usize = 16 * 1024 * 1024;
let data = vec![0xAB; SIZE];
let mut group = c.benchmark_group("chunked-16m");
group.throughput(Throughput::Bytes(SIZE as u64));
group.bench_function("5552", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(5552) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("8k", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(8 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("64k", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(64 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
group.bench_function("1m", |bencher| {
bencher.iter(|| {
let mut h = Adler32::new();
for chunk in data.chunks(1024 * 1024) {
h.write_slice(chunk);
}
h.checksum()
});
});
}
criterion_group!(benches, simple, chunked);
criterion_main!(benches);

146
third_party/rust/adler/src/algo.rs поставляемый
Просмотреть файл

@ -1,146 +0,0 @@
use crate::Adler32;
use std::ops::{AddAssign, MulAssign, RemAssign};
impl Adler32 {
pub(crate) fn compute(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n1)×D2 + (n2)×D3 + ... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 + ... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1, ..., Dn).
//
// If we fix some value k<N and rewrite indices 1, ..., N as
//
// 1_1, 1_2, ..., 1_k, 2_1, ..., 2_k, ..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j + ... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j + ... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) + ... + ka(k) + 1
// b = k*(kb(1) + kb(2) + ... + kb(k)) - 1*ka(2) - ... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
*s %= quotient;
}
}
}
impl MulAssign<u32> for U32X4 {
fn mul_assign(&mut self, rhs: u32) {
for s in self.0.iter_mut() {
*s *= rhs;
}
}
}

215
third_party/rust/adler/src/lib.rs поставляемый
Просмотреть файл

@ -1,215 +0,0 @@
//! Adler-32 checksum implementation.
//!
//! This implementation features:
//!
//! - Permissively licensed (0BSD) clean-room implementation.
//! - Zero dependencies.
//! - Decent performance (3-4 GB/s).
//! - `#![no_std]` support (with `default-features = false`).
#![doc(html_root_url = "https://docs.rs/adler/0.2.3")]
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_debug_implementations)]
#![forbid(unsafe_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate core as std;
mod algo;
use std::hash::Hasher;
#[cfg(feature = "std")]
use std::io::{self, BufRead};
/// Adler-32 checksum calculator.
///
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
///
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
/// that is not recommended (while every checksum is a hash, they are not necessarily good at being
/// one).
///
/// [`new`]: #method.new
/// [`from_checksum`]: #method.from_checksum
/// [`checksum`]: #method.checksum
#[derive(Debug, Copy, Clone)]
pub struct Adler32 {
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// // ...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
self.compute(bytes);
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
///
/// The passed `BufRead` implementor will be read until it reaches EOF.
///
/// If you only have a `Read` implementor, wrap it in `std::io::BufReader`.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> {
let mut h = Adler32::new();
loop {
let len = {
let buf = reader.fill_buf()?;
if buf.is_empty() {
return Ok(h.checksum());
}
h.write_slice(buf);
buf.len()
};
reader.consume(len);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::BufReader;
#[test]
fn zeroes() {
assert_eq!(adler32_slice(&[]), 1);
assert_eq!(adler32_slice(&[0]), 1 | 1 << 16);
assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16);
assert_eq!(adler32_slice(&[0; 100]), 0x00640001);
assert_eq!(adler32_slice(&[0; 1024]), 0x04000001);
assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001);
}
#[test]
fn ones() {
assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e);
assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11);
}
#[test]
fn mixed() {
assert_eq!(adler32_slice(&[1]), 2 | 2 << 16);
assert_eq!(adler32_slice(&[40]), 41 | 41 << 16);
assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398);
}
#[test]
fn resume() {
let mut adler = Adler32::new();
adler.write_slice(&[0xff; 1024]);
let partial = adler.checksum();
assert_eq!(partial, 0x79a6fc2e); // from above
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
// Make sure that we can resume computing from the partial checksum via `from_checksum`.
let mut adler = Adler32::from_checksum(partial);
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
}
#[test]
fn bufread() {
fn test(data: &[u8], checksum: u32) {
// `BufReader` uses an 8 KB buffer, so this will test buffer refilling.
let mut buf = BufReader::new(data);
let real_sum = adler32_reader(&mut buf).unwrap();
assert_eq!(checksum, real_sum);
}
test(&[], 1);
test(&[0; 1024], 0x04000001);
test(&[0; 1024 * 1024], 0x00f00001);
test(&[0xA5; 1024 * 1024], 0xd5009ab1);
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"eea47538c620fc4426eba15cfe1fc2d1c416076a377f7dbf70c0ad0e839023f8","LICENSE.md":"237710d4c9dd8eb7fead2c9c433be493a878f547d1093812feff43d18abe5896","readme.md":"33b56c15054e8a2eaae44674bb1d237ec3ca1ff242a076d0aa2fd42e93c228b6","src/config/endian.rs":"a492654e13b0e044a0551a97bae1a5634d28e038c6a9a9c54f17a1b81ae8b997","src/config/int.rs":"66065835fc56a515df57f2c19df6bfc8295a87266d05cf6dac17f0380cae2af6","src/config/legacy.rs":"db8dca430bb718058289b99528cd8e62502fdaf93d652ca6772026de8a784e40","src/config/limit.rs":"e5883d901a4b216e622f092503767ace121c396b645a24e28dcc528eab54d8e3","src/config/mod.rs":"7ccc4952b1624a5a6d5f28c4607cd8edb38f8eab4bbce00edc1671b175f91dac","src/config/trailing.rs":"dec18dbbd847e87456bcaa93413c377d922efb4239f0600879c7440540a07a3e","src/de/mod.rs":"b91fe450aedbc9332f59f189dd438fdabfb4d7482f951aa40db18b49fa91b942","src/de/read.rs":"bb32d12d8dee1b0df426e3cc48288664152468c48b1df2448edd18bf1df921be","src/error.rs":"f1154e228fd0d2dfc73bed9efb410875a8b84a77b40c2dfbe37d3dd244ed0741","src/internal.rs":"ac343424a43899f4f9d44855484bc84cd86a07fafc9ae721d92a2095afeb8b05","src/lib.rs":"1e713a7ba519dfe764c66f661ac27bde3a20038bc9c6ace97e5730a39a73e0d0","src/ser/mod.rs":"a9cbf0b6c24c73d6f51994ebdb4c3b8ecd410011b083b7a882d823a7271d2221","tests/test.rs":"056e1e9fa640587224f3c7fea869b1828db441368fb5935fecd689e610b0299a"},"package":"f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d"}
{"files":{"Cargo.toml":"16c6c5374dd14773571dfab3254557ca0e3f7810e1fb27df6e27c2112e16c605","LICENSE.md":"90d7e062634054e6866d3c81e6a2b3058a840e6af733e98e80bdfe1a7dec6912","readme.md":"8d9ee7f575a20798e09471169ff3b5e3dea6ab74c50d2128cfc77f4074e97149","src/config.rs":"e3e6e264cdc736c442b9299d7ad39475457f0c69d2ea8fa0de14f8120c5f3023","src/de/mod.rs":"c431445d27366eaa05553fe1eb5dee320e87b7145b26fe50a56568fc83ccfe95","src/de/read.rs":"e188e291aef8c4ce41552390a28caacb26188c796e25c912d9730ad411a4abeb","src/error.rs":"ce6617bf8523392e6fc8b853b7768899a229b6b78dabc2918c0e2dd3f846aa01","src/internal.rs":"55a69c335cf15038eb76f7ba71b0828b20ee1d16adbc5e10e2087efbb74c55ea","src/lib.rs":"41258f970098e3b0421daf9fbaff34efa716039632f5d1b6409e22fe473c5775","src/ser/mod.rs":"323ca31c66188ba952faf6de111c91fe551a27ebc522c10a3cfe2e5348a74390"},"package":"5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf"}

6
third_party/rust/bincode/Cargo.toml поставляемый
Просмотреть файл

@ -12,9 +12,9 @@
[package]
name = "bincode"
version = "1.3.1"
authors = ["Ty Overby <ty@pre-alpha.com>", "Francesco Mazzoli <f@mazzo.li>", "David Tolnay <dtolnay@gmail.com>", "Zoey Riordan <zoey@dos.cafe>"]
exclude = ["logo.png", "examples/*", ".gitignore", ".travis.yml"]
version = "1.2.1"
authors = ["Ty Overby <ty@pre-alpha.com>", "Francesco Mazzoli <f@mazzo.li>", "David Tolnay <dtolnay@gmail.com>", "Daniel Griffen"]
exclude = ["logo.png", "tests/*", "examples/*", ".gitignore", ".travis.yml"]
publish = true
description = "A binary serialization / deserialization strategy that uses Serde for transforming structs into bytes and vice versa!"
documentation = "https://docs.rs/bincode"

42
third_party/rust/bincode/LICENSE.md поставляемый
Просмотреть файл

@ -1,21 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Ty Overby
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
The MIT License (MIT)
Copyright (c) 2014 Ty Overby
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

140
third_party/rust/bincode/readme.md поставляемый
Просмотреть файл

@ -1,70 +1,70 @@
# Bincode
<img align="right" src="./logo.png" />
![CI](https://github.com/servo/bincode/workflows/CI/badge.svg)
[![](https://meritbadge.herokuapp.com/bincode)](https://crates.io/crates/bincode)
[![](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
A compact encoder / decoder pair that uses a binary zero-fluff encoding scheme.
The size of the encoded object will be the same or smaller than the size that
the object takes up in memory in a running Rust program.
In addition to exposing two simple functions
(one that encodes to `Vec<u8>`, and one that decodes from `&[u8]`),
binary-encode exposes a Reader/Writer API that makes it work
perfectly with other stream-based APIs such as Rust files, network streams,
and the [flate2-rs](https://github.com/alexcrichton/flate2-rs) compression
library.
## [API Documentation](https://docs.rs/bincode/)
## Bincode in the wild
* [google/tarpc](https://github.com/google/tarpc): Bincode is used to serialize and deserialize networked RPC messages.
* [servo/webrender](https://github.com/servo/webrender): Bincode records webrender API calls for record/replay-style graphics debugging.
* [servo/ipc-channel](https://github.com/servo/ipc-channel): IPC-Channel uses Bincode to send structs between processes using a channel-like API.
## Example
```rust
use serde::{Serialize, Deserialize};
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct Entity {
x: f32,
y: f32,
}
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct World(Vec<Entity>);
fn main() {
let world = World(vec![Entity { x: 0.0, y: 4.0 }, Entity { x: 10.0, y: 20.5 }]);
let encoded: Vec<u8> = bincode::serialize(&world).unwrap();
// 8 bytes for the length of the vector, 4 bytes per float.
assert_eq!(encoded.len(), 8 + 4 * 4);
let decoded: World = bincode::deserialize(&encoded[..]).unwrap();
assert_eq!(world, decoded);
}
```
## Details
The encoding (and thus decoding) proceeds unsurprisingly -- primitive
types are encoded according to the underlying `Writer`, tuples and
structs are encoded by encoding their fields one-by-one, and enums are
encoded by first writing out the tag representing the variant and
then the contents.
However, there are some implementation details to be aware of:
* `isize`/`usize` are encoded as `i64`/`u64`, for portability.
* enums variants are encoded as a `u32` instead of a `usize`.
`u32` is enough for all practical uses.
* `str` is encoded as `(u64, &[u8])`, where the `u64` is the number of
bytes contained in the encoded string.
# Bincode
<img align="right" src="./logo.png" />
[![Build Status](https://travis-ci.com/servo/bincode.svg)](https://travis-ci.com/servo/bincode)
[![](https://meritbadge.herokuapp.com/bincode)](https://crates.io/crates/bincode)
[![](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
A compact encoder / decoder pair that uses a binary zero-fluff encoding scheme.
The size of the encoded object will be the same or smaller than the size that
the object takes up in memory in a running Rust program.
In addition to exposing two simple functions
(one that encodes to `Vec<u8>`, and one that decodes from `&[u8]`),
binary-encode exposes a Reader/Writer API that makes it work
perfectly with other stream-based APIs such as Rust files, network streams,
and the [flate2-rs](https://github.com/alexcrichton/flate2-rs) compression
library.
## [API Documentation](https://docs.rs/bincode/)
## Bincode in the wild
* [google/tarpc](https://github.com/google/tarpc): Bincode is used to serialize and deserialize networked RPC messages.
* [servo/webrender](https://github.com/servo/webrender): Bincode records webrender API calls for record/replay-style graphics debugging.
* [servo/ipc-channel](https://github.com/servo/ipc-channel): IPC-Channel uses Bincode to send structs between processes using a channel-like API.
## Example
```rust
use serde::{Serialize, Deserialize};
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct Entity {
x: f32,
y: f32,
}
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct World(Vec<Entity>);
fn main() {
let world = World(vec![Entity { x: 0.0, y: 4.0 }, Entity { x: 10.0, y: 20.5 }]);
let encoded: Vec<u8> = bincode::serialize(&world).unwrap();
// 8 bytes for the length of the vector, 4 bytes per float.
assert_eq!(encoded.len(), 8 + 4 * 4);
let decoded: World = bincode::deserialize(&encoded[..]).unwrap();
assert_eq!(world, decoded);
}
```
## Details
The encoding (and thus decoding) proceeds unsurprisingly -- primitive
types are encoded according to the underlying `Writer`, tuples and
structs are encoded by encoding their fields one-by-one, and enums are
encoded by first writing out the tag representing the variant and
then the contents.
However, there are some implementation details to be aware of:
* `isize`/`usize` are encoded as `i64`/`u64`, for portability.
* enums variants are encoded as a `u32` instead of a `usize`.
`u32` is enough for all practical uses.
* `str` is encoded as `(u64, &[u8])`, where the `u64` is the number of
bytes contained in the encoded string.

Просмотреть файл

@ -1,253 +1,364 @@
use std::io::{Read, Write};
use self::EndianOption::*;
use self::LimitOption::*;
use super::{DefaultOptions, Options};
use de::read::BincodeRead;
use error::Result;
use serde;
/// A configuration builder whose options Bincode will use
/// while serializing and deserializing.
///
/// ### Options
/// Endianness: The endianness with which multi-byte integers will be read/written. *default: little endian*
/// Limit: The maximum number of bytes that will be read/written in a bincode serialize/deserialize. *default: unlimited*
///
/// ### Byte Limit Details
/// The purpose of byte-limiting is to prevent Denial-Of-Service attacks whereby malicious attackers get bincode
/// deserialization to crash your process by allocating too much memory or keeping a connection open for too long.
///
/// When a byte limit is set, bincode will return `Err` on any deserialization that goes over the limit, or any
/// serialization that goes over the limit.
#[derive(Clone, Debug)]
#[deprecated(
since = "1.3.0",
note = "please use the `DefaultOptions`/`Options` system instead"
)]
pub struct Config {
limit: LimitOption,
endian: EndianOption,
}
#[derive(Clone, Copy, Debug)]
enum LimitOption {
Unlimited,
Limited(u64),
}
#[derive(Clone, Copy, Debug)]
enum EndianOption {
Big,
Little,
Native,
}
macro_rules! config_map {
($self:expr, $opts:ident => $call:expr) => {
match ($self.limit, $self.endian) {
(Unlimited, Little) => {
let $opts = DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.with_no_limit()
.with_little_endian();
$call
}
(Unlimited, Big) => {
let $opts = DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.with_no_limit()
.with_big_endian();
$call
}
(Unlimited, Native) => {
let $opts = DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.with_no_limit()
.with_native_endian();
$call
}
(Limited(l), Little) => {
let $opts = DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.with_limit(l)
.with_little_endian();
$call
}
(Limited(l), Big) => {
let $opts = DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.with_limit(l)
.with_big_endian();
$call
}
(Limited(l), Native) => {
let $opts = DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.with_limit(l)
.with_native_endian();
$call
}
}
};
}
impl Config {
#[inline(always)]
pub(crate) fn new() -> Config {
Config {
limit: LimitOption::Unlimited,
endian: EndianOption::Little,
}
}
/// Sets the byte limit to be unlimited.
/// This is the default.
#[inline(always)]
pub fn no_limit(&mut self) -> &mut Self {
self.limit = LimitOption::Unlimited;
self
}
/// Sets the byte limit to `limit`.
#[inline(always)]
pub fn limit(&mut self, limit: u64) -> &mut Self {
self.limit = LimitOption::Limited(limit);
self
}
/// Sets the endianness to little-endian
/// This is the default.
#[inline(always)]
pub fn little_endian(&mut self) -> &mut Self {
self.endian = EndianOption::Little;
self
}
/// Sets the endianness to big-endian
#[inline(always)]
pub fn big_endian(&mut self) -> &mut Self {
self.endian = EndianOption::Big;
self
}
/// Sets the endianness to the the machine-native endianness
#[inline(always)]
pub fn native_endian(&mut self) -> &mut Self {
self.endian = EndianOption::Native;
self
}
/// Serializes a serializable object into a `Vec` of bytes using this configuration
#[inline(always)]
pub fn serialize<T: ?Sized + serde::Serialize>(&self, t: &T) -> Result<Vec<u8>> {
config_map!(self, opts => ::internal::serialize(t, opts))
}
/// Returns the size that an object would be if serialized using Bincode with this configuration
#[inline(always)]
pub fn serialized_size<T: ?Sized + serde::Serialize>(&self, t: &T) -> Result<u64> {
config_map!(self, opts => ::internal::serialized_size(t, opts))
}
/// Serializes an object directly into a `Writer` using this configuration
///
/// If the serialization would take more bytes than allowed by the size limit, an error
/// is returned and *no bytes* will be written into the `Writer`
#[inline(always)]
pub fn serialize_into<W: Write, T: ?Sized + serde::Serialize>(
&self,
w: W,
t: &T,
) -> Result<()> {
config_map!(self, opts => ::internal::serialize_into(w, t, opts))
}
/// Deserializes a slice of bytes into an instance of `T` using this configuration
#[inline(always)]
pub fn deserialize<'a, T: serde::Deserialize<'a>>(&self, bytes: &'a [u8]) -> Result<T> {
config_map!(self, opts => ::internal::deserialize(bytes, opts))
}
/// TODO: document
#[doc(hidden)]
#[inline(always)]
pub fn deserialize_in_place<'a, R, T>(&self, reader: R, place: &mut T) -> Result<()>
where
R: BincodeRead<'a>,
T: serde::de::Deserialize<'a>,
{
config_map!(self, opts => ::internal::deserialize_in_place(reader, opts, place))
}
/// Deserializes a slice of bytes with state `seed` using this configuration.
#[inline(always)]
pub fn deserialize_seed<'a, T: serde::de::DeserializeSeed<'a>>(
&self,
seed: T,
bytes: &'a [u8],
) -> Result<T::Value> {
config_map!(self, opts => ::internal::deserialize_seed(seed, bytes, opts))
}
/// Deserializes an object directly from a `Read`er using this configuration
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
pub fn deserialize_from<R: Read, T: serde::de::DeserializeOwned>(
&self,
reader: R,
) -> Result<T> {
config_map!(self, opts => ::internal::deserialize_from(reader, opts))
}
/// Deserializes an object directly from a `Read`er with state `seed` using this configuration
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
pub fn deserialize_from_seed<'a, R: Read, T: serde::de::DeserializeSeed<'a>>(
&self,
seed: T,
reader: R,
) -> Result<T::Value> {
config_map!(self, opts => ::internal::deserialize_from_seed(seed, reader, opts))
}
/// Deserializes an object from a custom `BincodeRead`er using the default configuration.
/// It is highly recommended to use `deserialize_from` unless you need to implement
/// `BincodeRead` for performance reasons.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
pub fn deserialize_from_custom<'a, R: BincodeRead<'a>, T: serde::de::DeserializeOwned>(
&self,
reader: R,
) -> Result<T> {
config_map!(self, opts => ::internal::deserialize_from_custom(reader, opts))
}
/// Deserializes an object from a custom `BincodeRead`er with state `seed` using the default
/// configuration. It is highly recommended to use `deserialize_from` unless you need to
/// implement `BincodeRead` for performance reasons.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
pub fn deserialize_from_custom_seed<
'a,
R: BincodeRead<'a>,
T: serde::de::DeserializeSeed<'a>,
>(
&self,
seed: T,
reader: R,
) -> Result<T::Value> {
config_map!(self, opts => ::internal::deserialize_from_custom_seed(seed, reader, opts))
}
}
use super::internal::{Bounded, Infinite, SizeLimit};
use byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian};
use de::read::BincodeRead;
use error::Result;
use serde;
use std::io::{Read, Write};
use std::marker::PhantomData;
use {DeserializerAcceptor, SerializerAcceptor};
use self::EndianOption::*;
use self::LimitOption::*;
struct DefaultOptions(Infinite);
pub(crate) trait Options {
type Limit: SizeLimit + 'static;
type Endian: ByteOrder + 'static;
fn limit(&mut self) -> &mut Self::Limit;
}
pub(crate) trait OptionsExt: Options + Sized {
fn with_no_limit(self) -> WithOtherLimit<Self, Infinite> {
WithOtherLimit::new(self, Infinite)
}
fn with_limit(self, limit: u64) -> WithOtherLimit<Self, Bounded> {
WithOtherLimit::new(self, Bounded(limit))
}
fn with_little_endian(self) -> WithOtherEndian<Self, LittleEndian> {
WithOtherEndian::new(self)
}
fn with_big_endian(self) -> WithOtherEndian<Self, BigEndian> {
WithOtherEndian::new(self)
}
fn with_native_endian(self) -> WithOtherEndian<Self, NativeEndian> {
WithOtherEndian::new(self)
}
}
impl<'a, O: Options> Options for &'a mut O {
type Limit = O::Limit;
type Endian = O::Endian;
#[inline(always)]
fn limit(&mut self) -> &mut Self::Limit {
(*self).limit()
}
}
impl<T: Options> OptionsExt for T {}
impl DefaultOptions {
fn new() -> DefaultOptions {
DefaultOptions(Infinite)
}
}
impl Options for DefaultOptions {
type Limit = Infinite;
type Endian = LittleEndian;
#[inline(always)]
fn limit(&mut self) -> &mut Infinite {
&mut self.0
}
}
#[derive(Clone, Copy)]
enum LimitOption {
Unlimited,
Limited(u64),
}
#[derive(Clone, Copy)]
enum EndianOption {
Big,
Little,
Native,
}
/// A configuration builder whose options Bincode will use
/// while serializing and deserializing.
///
/// ### Options
/// Endianness: The endianness with which multi-byte integers will be read/written. *default: little endian*
/// Limit: The maximum number of bytes that will be read/written in a bincode serialize/deserialize. *default: unlimited*
///
/// ### Byte Limit Details
/// The purpose of byte-limiting is to prevent Denial-Of-Service attacks whereby malicious attackers get bincode
/// deserialization to crash your process by allocating too much memory or keeping a connection open for too long.
///
/// When a byte limit is set, bincode will return `Err` on any deserialization that goes over the limit, or any
/// serialization that goes over the limit.
#[derive(Clone)]
pub struct Config {
limit: LimitOption,
endian: EndianOption,
}
pub(crate) struct WithOtherLimit<O: Options, L: SizeLimit> {
_options: O,
pub(crate) new_limit: L,
}
pub(crate) struct WithOtherEndian<O: Options, E: ByteOrder> {
options: O,
_endian: PhantomData<E>,
}
impl<O: Options, L: SizeLimit> WithOtherLimit<O, L> {
#[inline(always)]
pub(crate) fn new(options: O, limit: L) -> WithOtherLimit<O, L> {
WithOtherLimit {
_options: options,
new_limit: limit,
}
}
}
impl<O: Options, E: ByteOrder> WithOtherEndian<O, E> {
#[inline(always)]
pub(crate) fn new(options: O) -> WithOtherEndian<O, E> {
WithOtherEndian {
options: options,
_endian: PhantomData,
}
}
}
impl<O: Options, E: ByteOrder + 'static> Options for WithOtherEndian<O, E> {
type Limit = O::Limit;
type Endian = E;
#[inline(always)]
fn limit(&mut self) -> &mut O::Limit {
self.options.limit()
}
}
impl<O: Options, L: SizeLimit + 'static> Options for WithOtherLimit<O, L> {
type Limit = L;
type Endian = O::Endian;
fn limit(&mut self) -> &mut L {
&mut self.new_limit
}
}
macro_rules! config_map {
($self:expr, $opts:ident => $call:expr) => {
match ($self.limit, $self.endian) {
(Unlimited, Little) => {
let $opts = DefaultOptions::new().with_no_limit().with_little_endian();
$call
}
(Unlimited, Big) => {
let $opts = DefaultOptions::new().with_no_limit().with_big_endian();
$call
}
(Unlimited, Native) => {
let $opts = DefaultOptions::new().with_no_limit().with_native_endian();
$call
}
(Limited(l), Little) => {
let $opts = DefaultOptions::new().with_limit(l).with_little_endian();
$call
}
(Limited(l), Big) => {
let $opts = DefaultOptions::new().with_limit(l).with_big_endian();
$call
}
(Limited(l), Native) => {
let $opts = DefaultOptions::new().with_limit(l).with_native_endian();
$call
}
}
};
}
impl Config {
#[inline(always)]
pub(crate) fn new() -> Config {
Config {
limit: LimitOption::Unlimited,
endian: EndianOption::Little,
}
}
/// Sets the byte limit to be unlimited.
/// This is the default.
#[inline(always)]
pub fn no_limit(&mut self) -> &mut Self {
self.limit = LimitOption::Unlimited;
self
}
/// Sets the byte limit to `limit`.
#[inline(always)]
pub fn limit(&mut self, limit: u64) -> &mut Self {
self.limit = LimitOption::Limited(limit);
self
}
/// Sets the endianness to little-endian
/// This is the default.
#[inline(always)]
pub fn little_endian(&mut self) -> &mut Self {
self.endian = EndianOption::Little;
self
}
/// Sets the endianness to big-endian
#[inline(always)]
pub fn big_endian(&mut self) -> &mut Self {
self.endian = EndianOption::Big;
self
}
/// Sets the endianness to the the machine-native endianness
#[inline(always)]
pub fn native_endian(&mut self) -> &mut Self {
self.endian = EndianOption::Native;
self
}
/// Serializes a serializable object into a `Vec` of bytes using this configuration
#[inline(always)]
pub fn serialize<T: ?Sized + serde::Serialize>(&self, t: &T) -> Result<Vec<u8>> {
config_map!(self, opts => ::internal::serialize(t, opts))
}
/// Returns the size that an object would be if serialized using Bincode with this configuration
#[inline(always)]
pub fn serialized_size<T: ?Sized + serde::Serialize>(&self, t: &T) -> Result<u64> {
config_map!(self, opts => ::internal::serialized_size(t, opts))
}
/// Serializes an object directly into a `Writer` using this configuration
///
/// If the serialization would take more bytes than allowed by the size limit, an error
/// is returned and *no bytes* will be written into the `Writer`
#[inline(always)]
pub fn serialize_into<W: Write, T: ?Sized + serde::Serialize>(
&self,
w: W,
t: &T,
) -> Result<()> {
config_map!(self, opts => ::internal::serialize_into(w, t, opts))
}
/// Deserializes a slice of bytes into an instance of `T` using this configuration
#[inline(always)]
pub fn deserialize<'a, T: serde::Deserialize<'a>>(&self, bytes: &'a [u8]) -> Result<T> {
config_map!(self, opts => ::internal::deserialize(bytes, opts))
}
/// TODO: document
#[doc(hidden)]
#[inline(always)]
pub fn deserialize_in_place<'a, R, T>(&self, reader: R, place: &mut T) -> Result<()>
where
R: BincodeRead<'a>,
T: serde::de::Deserialize<'a>,
{
config_map!(self, opts => ::internal::deserialize_in_place(reader, opts, place))
}
/// Deserializes a slice of bytes with state `seed` using this configuration.
#[inline(always)]
pub fn deserialize_seed<'a, T: serde::de::DeserializeSeed<'a>>(
&self,
seed: T,
bytes: &'a [u8],
) -> Result<T::Value> {
config_map!(self, opts => ::internal::deserialize_seed(seed, bytes, opts))
}
/// Deserializes an object directly from a `Read`er using this configuration
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
pub fn deserialize_from<R: Read, T: serde::de::DeserializeOwned>(
&self,
reader: R,
) -> Result<T> {
config_map!(self, opts => ::internal::deserialize_from(reader, opts))
}
/// Deserializes an object directly from a `Read`er with state `seed` using this configuration
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
pub fn deserialize_from_seed<'a, R: Read, T: serde::de::DeserializeSeed<'a>>(
&self,
seed: T,
reader: R,
) -> Result<T::Value> {
config_map!(self, opts => ::internal::deserialize_from_seed(seed, reader, opts))
}
/// Deserializes an object from a custom `BincodeRead`er using the default configuration.
/// It is highly recommended to use `deserialize_from` unless you need to implement
/// `BincodeRead` for performance reasons.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
pub fn deserialize_from_custom<'a, R: BincodeRead<'a>, T: serde::de::DeserializeOwned>(
&self,
reader: R,
) -> Result<T> {
config_map!(self, opts => ::internal::deserialize_from_custom(reader, opts))
}
/// Deserializes an object from a custom `BincodeRead`er with state `seed` using the default
/// configuration. It is highly recommended to use `deserialize_from` unless you need to
/// implement `BincodeRead` for performance reasons.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
pub fn deserialize_from_custom_seed<
'a,
R: BincodeRead<'a>,
T: serde::de::DeserializeSeed<'a>,
>(
&self,
seed: T,
reader: R,
) -> Result<T::Value> {
config_map!(self, opts => ::internal::deserialize_from_custom_seed(seed, reader, opts))
}
/// Executes the acceptor with a serde::Deserializer instance.
/// NOT A PART OF THE STABLE PUBLIC API
#[doc(hidden)]
pub fn with_deserializer<'a, A, R>(&self, reader: R, acceptor: A) -> A::Output
where
A: DeserializerAcceptor<'a>,
R: BincodeRead<'a>,
{
config_map!(self, opts => {
let mut deserializer = ::de::Deserializer::new(reader, opts);
acceptor.accept(&mut deserializer)
})
}
/// Executes the acceptor with a serde::Serializer instance.
/// NOT A PART OF THE STABLE PUBLIC API
#[doc(hidden)]
pub fn with_serializer<A, W>(&self, writer: W, acceptor: A) -> A::Output
where
A: SerializerAcceptor,
W: Write,
{
config_map!(self, opts => {
let mut serializer = ::ser::Serializer::new(writer, opts);
acceptor.accept(&mut serializer)
})
}
}

29
third_party/rust/bincode/src/config/endian.rs поставляемый
Просмотреть файл

@ -1,29 +0,0 @@
use byteorder::{self, ByteOrder};
pub trait BincodeByteOrder {
type Endian: ByteOrder + 'static;
}
/// Little-endian byte ordering.
#[derive(Copy, Clone)]
pub struct LittleEndian;
/// Big-endian byte ordering.
#[derive(Copy, Clone)]
pub struct BigEndian;
/// The native byte ordering of the current system.
#[derive(Copy, Clone)]
pub struct NativeEndian;
impl BincodeByteOrder for LittleEndian {
type Endian = byteorder::LittleEndian;
}
impl BincodeByteOrder for BigEndian {
type Endian = byteorder::BigEndian;
}
impl BincodeByteOrder for NativeEndian {
type Endian = byteorder::NativeEndian;
}

682
third_party/rust/bincode/src/config/int.rs поставляемый
Просмотреть файл

@ -1,682 +0,0 @@
use std::io::Write;
use std::mem::size_of;
use super::Options;
use de::read::BincodeRead;
use error::{ErrorKind, Result};
pub trait IntEncoding {
/// Gets the size (in bytes) that a value would be serialized to.
fn u16_size(n: u16) -> u64;
/// Gets the size (in bytes) that a value would be serialized to.
fn u32_size(n: u32) -> u64;
/// Gets the size (in bytes) that a value would be serialized to.
fn u64_size(n: u64) -> u64;
/// Gets the size (in bytes) that a value would be serialized to.
fn i16_size(n: i16) -> u64;
/// Gets the size (in bytes) that a value would be serialized to.
fn i32_size(n: i32) -> u64;
/// Gets the size (in bytes) that a value would be serialized to.
fn i64_size(n: i64) -> u64;
#[inline(always)]
fn len_size(len: usize) -> u64 {
Self::u64_size(len as u64)
}
/// Serializes a sequence length.
#[inline(always)]
fn serialize_len<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
len: usize,
) -> Result<()> {
Self::serialize_u64(ser, len as u64)
}
fn serialize_u16<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
val: u16,
) -> Result<()>;
fn serialize_u32<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
val: u32,
) -> Result<()>;
fn serialize_u64<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
val: u64,
) -> Result<()>;
fn serialize_i16<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
val: i16,
) -> Result<()>;
fn serialize_i32<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
val: i32,
) -> Result<()>;
fn serialize_i64<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
val: i64,
) -> Result<()>;
/// Deserializes a sequence length.
#[inline(always)]
fn deserialize_len<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<usize> {
Self::deserialize_u64(de).and_then(cast_u64_to_usize)
}
fn deserialize_u16<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<u16>;
fn deserialize_u32<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<u32>;
fn deserialize_u64<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<u64>;
fn deserialize_i16<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<i16>;
fn deserialize_i32<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<i32>;
fn deserialize_i64<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<i64>;
serde_if_integer128! {
fn u128_size(v: u128) -> u64;
fn i128_size(v: i128) -> u64;
fn serialize_u128<W: Write, O: Options>(
ser: &mut ::Serializer<W, O>,
val: u128,
) -> Result<()>;
fn deserialize_u128<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u128>;
fn serialize_i128<W: Write, O: Options>(
ser: &mut ::Serializer<W, O>,
val: i128,
) -> Result<()>;
fn deserialize_i128<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i128>;
}
}
/// Fixed-size integer encoding.
///
/// * Fixed size integers are encoded directly
/// * Enum discriminants are encoded as u32
/// * Lengths and usize are encoded as u64
#[derive(Copy, Clone)]
pub struct FixintEncoding;
/// Variable-size integer encoding (excepting [ui]8).
///
/// Encoding an unsigned integer v (of any type excepting u8) works as follows:
///
/// 1. If `u < 251`, encode it as a single byte with that value.
/// 2. If `251 <= u < 2**16`, encode it as a literal byte 251, followed by a u16 with value `u`.
/// 3. If `2**16 <= u < 2**32`, encode it as a literal byte 252, followed by a u32 with value `u`.
/// 4. If `2**32 <= u < 2**64`, encode it as a literal byte 253, followed by a u64 with value `u`.
/// 5. If `2**64 <= u < 2**128`, encode it as a literal byte 254, followed by a
/// u128 with value `u`.
///
/// Then, for signed integers, we first convert to unsigned using the zigzag algorithm,
/// and then encode them as we do for unsigned integers generally. The reason we use this
/// algorithm is that it encodes those values which are close to zero in less bytes; the
/// obvious algorithm, where we encode the cast values, gives a very large encoding for all
/// negative values.
///
/// The zigzag algorithm is defined as follows:
///
/// ```ignore
/// fn zigzag(v: Signed) -> Unsigned {
/// match v {
/// 0 => 0,
/// v if v < 0 => |v| * 2 - 1
/// v if v > 0 => v * 2
/// }
/// }
/// ```
///
/// And works such that:
///
/// ```ignore
/// assert_eq!(zigzag(0), 0);
/// assert_eq!(zigzag(-1), 1);
/// assert_eq!(zigzag(1), 2);
/// assert_eq!(zigzag(-2), 3);
/// assert_eq!(zigzag(2), 4);
/// assert_eq!(zigzag(i64::min_value()), u64::max_value());
/// ```
///
/// Note that u256 and the like are unsupported by this format; if and when they are added to the
/// language, they may be supported via the extension point given by the 255 byte.
#[derive(Copy, Clone)]
pub struct VarintEncoding;
const SINGLE_BYTE_MAX: u8 = 250;
const U16_BYTE: u8 = 251;
const U32_BYTE: u8 = 252;
const U64_BYTE: u8 = 253;
const U128_BYTE: u8 = 254;
const DESERIALIZE_EXTENSION_POINT_ERR: &str = r#"
Byte 255 is treated as an extension point; it should not be encoding anything.
Do you have a mismatched bincode version or configuration?
"#;
impl VarintEncoding {
fn varint_size(n: u64) -> u64 {
if n <= SINGLE_BYTE_MAX as u64 {
1
} else if n <= u16::max_value() as u64 {
(1 + size_of::<u16>()) as u64
} else if n <= u32::max_value() as u64 {
(1 + size_of::<u32>()) as u64
} else {
(1 + size_of::<u64>()) as u64
}
}
#[inline(always)]
fn zigzag_encode(n: i64) -> u64 {
if n < 0 {
// let's avoid the edge case of i64::min_value()
// !n is equal to `-n - 1`, so this is:
// !n * 2 + 1 = 2(-n - 1) + 1 = -2n - 2 + 1 = -2n - 1
!(n as u64) * 2 + 1
} else {
(n as u64) * 2
}
}
#[inline(always)]
fn zigzag_decode(n: u64) -> i64 {
if n % 2 == 0 {
// positive number
(n / 2) as i64
} else {
// negative number
// !m * 2 + 1 = n
// !m * 2 = n - 1
// !m = (n - 1) / 2
// m = !((n - 1) / 2)
// since we have n is odd, we have floor(n / 2) = floor((n - 1) / 2)
!(n / 2) as i64
}
}
fn serialize_varint<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
n: u64,
) -> Result<()> {
if n <= SINGLE_BYTE_MAX as u64 {
ser.serialize_byte(n as u8)
} else if n <= u16::max_value() as u64 {
ser.serialize_byte(U16_BYTE)?;
ser.serialize_literal_u16(n as u16)
} else if n <= u32::max_value() as u64 {
ser.serialize_byte(U32_BYTE)?;
ser.serialize_literal_u32(n as u32)
} else {
ser.serialize_byte(U64_BYTE)?;
ser.serialize_literal_u64(n as u64)
}
}
fn deserialize_varint<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<u64> {
#[allow(ellipsis_inclusive_range_patterns)]
match de.deserialize_byte()? {
byte @ 0...SINGLE_BYTE_MAX => Ok(byte as u64),
U16_BYTE => Ok(de.deserialize_literal_u16()? as u64),
U32_BYTE => Ok(de.deserialize_literal_u32()? as u64),
U64_BYTE => de.deserialize_literal_u64(),
U128_BYTE => Err(Box::new(ErrorKind::Custom(
"Invalid value (u128 range): you may have a version or configuration disagreement?"
.to_string(),
))),
_ => Err(Box::new(ErrorKind::Custom(
DESERIALIZE_EXTENSION_POINT_ERR.to_string(),
))),
}
}
serde_if_integer128! {
// see zigzag_encode and zigzag_decode for implementation comments
#[inline(always)]
fn zigzag128_encode(n: i128) -> u128 {
if n < 0 {
!(n as u128) * 2 + 1
} else {
(n as u128) * 2
}
}
#[inline(always)]
fn zigzag128_decode(n: u128) -> i128 {
if n % 2 == 0 {
(n / 2) as i128
} else {
!(n / 2) as i128
}
}
fn varint128_size(n: u128) -> u64 {
if n <= SINGLE_BYTE_MAX as u128 {
1
} else if n <= u16::max_value() as u128 {
(1 + size_of::<u16>()) as u64
} else if n <= u32::max_value() as u128 {
(1 + size_of::<u32>()) as u64
} else if n <= u64::max_value() as u128 {
(1 + size_of::<u64>()) as u64
} else {
(1 + size_of::<u128>()) as u64
}
}
fn serialize_varint128<W: Write, O: Options>(
ser: &mut ::ser::Serializer<W, O>,
n: u128,
) -> Result<()> {
if n <= SINGLE_BYTE_MAX as u128 {
ser.serialize_byte(n as u8)
} else if n <= u16::max_value() as u128 {
ser.serialize_byte(U16_BYTE)?;
ser.serialize_literal_u16(n as u16)
} else if n <= u32::max_value() as u128 {
ser.serialize_byte(U32_BYTE)?;
ser.serialize_literal_u32(n as u32)
} else if n <= u64::max_value() as u128 {
ser.serialize_byte(U64_BYTE)?;
ser.serialize_literal_u64(n as u64)
} else {
ser.serialize_byte(U128_BYTE)?;
ser.serialize_literal_u128(n)
}
}
fn deserialize_varint128<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::de::Deserializer<R, O>,
) -> Result<u128> {
#[allow(ellipsis_inclusive_range_patterns)]
match de.deserialize_byte()? {
byte @ 0...SINGLE_BYTE_MAX => Ok(byte as u128),
U16_BYTE => Ok(de.deserialize_literal_u16()? as u128),
U32_BYTE => Ok(de.deserialize_literal_u32()? as u128),
U64_BYTE => Ok(de.deserialize_literal_u64()? as u128),
U128_BYTE => de.deserialize_literal_u128(),
_ => Err(Box::new(ErrorKind::Custom(DESERIALIZE_EXTENSION_POINT_ERR.to_string()))),
}
}
}
}
impl IntEncoding for FixintEncoding {
#[inline(always)]
fn u16_size(_: u16) -> u64 {
size_of::<u16>() as u64
}
#[inline(always)]
fn u32_size(_: u32) -> u64 {
size_of::<u32>() as u64
}
#[inline(always)]
fn u64_size(_: u64) -> u64 {
size_of::<u64>() as u64
}
#[inline(always)]
fn i16_size(_: i16) -> u64 {
size_of::<i16>() as u64
}
#[inline(always)]
fn i32_size(_: i32) -> u64 {
size_of::<i32>() as u64
}
#[inline(always)]
fn i64_size(_: i64) -> u64 {
size_of::<i64>() as u64
}
#[inline(always)]
fn serialize_u16<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: u16) -> Result<()> {
ser.serialize_literal_u16(val)
}
#[inline(always)]
fn serialize_u32<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: u32) -> Result<()> {
ser.serialize_literal_u32(val)
}
#[inline(always)]
fn serialize_u64<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: u64) -> Result<()> {
ser.serialize_literal_u64(val)
}
#[inline(always)]
fn serialize_i16<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: i16) -> Result<()> {
ser.serialize_literal_u16(val as u16)
}
#[inline(always)]
fn serialize_i32<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: i32) -> Result<()> {
ser.serialize_literal_u32(val as u32)
}
#[inline(always)]
fn serialize_i64<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: i64) -> Result<()> {
ser.serialize_literal_u64(val as u64)
}
#[inline(always)]
fn deserialize_u16<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u16> {
de.deserialize_literal_u16()
}
#[inline(always)]
fn deserialize_u32<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u32> {
de.deserialize_literal_u32()
}
#[inline(always)]
fn deserialize_u64<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u64> {
de.deserialize_literal_u64()
}
#[inline(always)]
fn deserialize_i16<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i16> {
Ok(de.deserialize_literal_u16()? as i16)
}
#[inline(always)]
fn deserialize_i32<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i32> {
Ok(de.deserialize_literal_u32()? as i32)
}
#[inline(always)]
fn deserialize_i64<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i64> {
Ok(de.deserialize_literal_u64()? as i64)
}
serde_if_integer128! {
#[inline(always)]
fn u128_size(_: u128) -> u64{
size_of::<u128>() as u64
}
#[inline(always)]
fn i128_size(_: i128) -> u64{
size_of::<i128>() as u64
}
#[inline(always)]
fn serialize_u128<W: Write, O: Options>(
ser: &mut ::Serializer<W, O>,
val: u128,
) -> Result<()> {
ser.serialize_literal_u128(val)
}
#[inline(always)]
fn serialize_i128<W: Write, O: Options>(
ser: &mut ::Serializer<W, O>,
val: i128,
) -> Result<()> {
ser.serialize_literal_u128(val as u128)
}
#[inline(always)]
fn deserialize_u128<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u128> {
de.deserialize_literal_u128()
}
#[inline(always)]
fn deserialize_i128<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i128> {
Ok(de.deserialize_literal_u128()? as i128)
}
}
}
impl IntEncoding for VarintEncoding {
#[inline(always)]
fn u16_size(n: u16) -> u64 {
Self::varint_size(n as u64)
}
#[inline(always)]
fn u32_size(n: u32) -> u64 {
Self::varint_size(n as u64)
}
#[inline(always)]
fn u64_size(n: u64) -> u64 {
Self::varint_size(n)
}
#[inline(always)]
fn i16_size(n: i16) -> u64 {
Self::varint_size(Self::zigzag_encode(n as i64))
}
#[inline(always)]
fn i32_size(n: i32) -> u64 {
Self::varint_size(Self::zigzag_encode(n as i64))
}
#[inline(always)]
fn i64_size(n: i64) -> u64 {
Self::varint_size(Self::zigzag_encode(n))
}
#[inline(always)]
fn serialize_u16<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: u16) -> Result<()> {
Self::serialize_varint(ser, val as u64)
}
#[inline(always)]
fn serialize_u32<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: u32) -> Result<()> {
Self::serialize_varint(ser, val as u64)
}
#[inline(always)]
fn serialize_u64<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: u64) -> Result<()> {
Self::serialize_varint(ser, val)
}
#[inline(always)]
fn serialize_i16<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: i16) -> Result<()> {
Self::serialize_varint(ser, Self::zigzag_encode(val as i64))
}
#[inline(always)]
fn serialize_i32<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: i32) -> Result<()> {
Self::serialize_varint(ser, Self::zigzag_encode(val as i64))
}
#[inline(always)]
fn serialize_i64<W: Write, O: Options>(ser: &mut ::Serializer<W, O>, val: i64) -> Result<()> {
Self::serialize_varint(ser, Self::zigzag_encode(val))
}
#[inline(always)]
fn deserialize_u16<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u16> {
Self::deserialize_varint(de).and_then(cast_u64_to_u16)
}
#[inline(always)]
fn deserialize_u32<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u32> {
Self::deserialize_varint(de).and_then(cast_u64_to_u32)
}
#[inline(always)]
fn deserialize_u64<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u64> {
Self::deserialize_varint(de)
}
#[inline(always)]
fn deserialize_i16<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i16> {
Self::deserialize_varint(de)
.map(Self::zigzag_decode)
.and_then(cast_i64_to_i16)
}
#[inline(always)]
fn deserialize_i32<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i32> {
Self::deserialize_varint(de)
.map(Self::zigzag_decode)
.and_then(cast_i64_to_i32)
}
#[inline(always)]
fn deserialize_i64<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i64> {
Self::deserialize_varint(de).map(Self::zigzag_decode)
}
serde_if_integer128! {
#[inline(always)]
fn u128_size(n: u128) -> u64 {
Self::varint128_size(n)
}
#[inline(always)]
fn i128_size(n: i128) -> u64 {
Self::varint128_size(Self::zigzag128_encode(n))
}
#[inline(always)]
fn serialize_u128<W: Write, O: Options>(
ser: &mut ::Serializer<W, O>,
val: u128,
) -> Result<()> {
Self::serialize_varint128(ser, val)
}
#[inline(always)]
fn serialize_i128<W: Write, O: Options>(
ser: &mut ::Serializer<W, O>,
val: i128,
) -> Result<()> {
Self::serialize_varint128(ser, Self::zigzag128_encode(val))
}
#[inline(always)]
fn deserialize_u128<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<u128> {
Self::deserialize_varint128(de)
}
#[inline(always)]
fn deserialize_i128<'de, R: BincodeRead<'de>, O: Options>(
de: &mut ::Deserializer<R, O>,
) -> Result<i128> {
Self::deserialize_varint128(de).map(Self::zigzag128_decode)
}
}
}
fn cast_u64_to_usize(n: u64) -> Result<usize> {
if n <= usize::max_value() as u64 {
Ok(n as usize)
} else {
Err(Box::new(ErrorKind::Custom(format!(
"Invalid size {}: sizes must fit in a usize (0 to {})",
n,
usize::max_value()
))))
}
}
fn cast_u64_to_u32(n: u64) -> Result<u32> {
if n <= u32::max_value() as u64 {
Ok(n as u32)
} else {
Err(Box::new(ErrorKind::Custom(format!(
"Invalid u32 {}: you may have a version disagreement?",
n,
))))
}
}
fn cast_u64_to_u16(n: u64) -> Result<u16> {
if n <= u16::max_value() as u64 {
Ok(n as u16)
} else {
Err(Box::new(ErrorKind::Custom(format!(
"Invalid u16 {}: you may have a version disagreement?",
n,
))))
}
}
fn cast_i64_to_i32(n: i64) -> Result<i32> {
if n <= i32::max_value() as i64 && n >= i32::min_value() as i64 {
Ok(n as i32)
} else {
Err(Box::new(ErrorKind::Custom(format!(
"Invalid i32 {}: you may have a version disagreement?",
n,
))))
}
}
fn cast_i64_to_i16(n: i64) -> Result<i16> {
if n <= i16::max_value() as i64 && n >= i16::min_value() as i64 {
Ok(n as i16)
} else {
Err(Box::new(ErrorKind::Custom(format!(
"Invalid i16 {}: you may have a version disagreement?",
n,
))))
}
}
#[cfg(test)]
mod test {
use super::VarintEncoding;
#[test]
fn test_zigzag_encode() {
let zigzag = VarintEncoding::zigzag_encode;
assert_eq!(zigzag(0), 0);
for x in 1..512 {
assert_eq!(zigzag(x), (x as u64) * 2);
assert_eq!(zigzag(-x), (x as u64) * 2 - 1);
}
}
#[test]
fn test_zigzag_decode() {
// zigzag'
let zigzagp = VarintEncoding::zigzag_decode;
for x in (0..512).map(|x| x * 2) {
assert_eq!(zigzagp(x), x as i64 / 2);
assert_eq!(zigzagp(x + 1), -(x as i64) / 2 - 1);
}
}
#[test]
fn test_zigzag_edge_cases() {
let (zigzag, zigzagp) = (VarintEncoding::zigzag_encode, VarintEncoding::zigzag_decode);
assert_eq!(zigzag(i64::max_value()), u64::max_value() - 1);
assert_eq!(zigzag(i64::min_value()), u64::max_value());
assert_eq!(zigzagp(u64::max_value() - 1), i64::max_value());
assert_eq!(zigzagp(u64::max_value()), i64::min_value());
}
}

49
third_party/rust/bincode/src/config/limit.rs поставляемый
Просмотреть файл

@ -1,49 +0,0 @@
use error::{ErrorKind, Result};
/// A trait for stopping serialization and deserialization when a certain limit has been reached.
pub trait SizeLimit {
/// Tells the SizeLimit that a certain number of bytes has been
/// read or written. Returns Err if the limit has been exceeded.
fn add(&mut self, n: u64) -> Result<()>;
/// Returns the hard limit (if one exists)
fn limit(&self) -> Option<u64>;
}
/// A SizeLimit that restricts serialized or deserialized messages from
/// exceeding a certain byte length.
#[derive(Copy, Clone)]
pub struct Bounded(pub u64);
/// A SizeLimit without a limit!
/// Use this if you don't care about the size of encoded or decoded messages.
#[derive(Copy, Clone)]
pub struct Infinite;
impl SizeLimit for Bounded {
#[inline(always)]
fn add(&mut self, n: u64) -> Result<()> {
if self.0 >= n {
self.0 -= n;
Ok(())
} else {
Err(Box::new(ErrorKind::SizeLimit))
}
}
#[inline(always)]
fn limit(&self) -> Option<u64> {
Some(self.0)
}
}
impl SizeLimit for Infinite {
#[inline(always)]
fn add(&mut self, _: u64) -> Result<()> {
Ok(())
}
#[inline(always)]
fn limit(&self) -> Option<u64> {
None
}
}

362
third_party/rust/bincode/src/config/mod.rs поставляемый
Просмотреть файл

@ -1,362 +0,0 @@
use de::read::BincodeRead;
use error::Result;
use serde;
use std::io::{Read, Write};
use std::marker::PhantomData;
pub(crate) use self::endian::BincodeByteOrder;
pub(crate) use self::int::IntEncoding;
pub(crate) use self::internal::*;
pub(crate) use self::limit::SizeLimit;
pub(crate) use self::trailing::TrailingBytes;
pub use self::endian::{BigEndian, LittleEndian, NativeEndian};
pub use self::int::{FixintEncoding, VarintEncoding};
pub use self::legacy::*;
pub use self::limit::{Bounded, Infinite};
pub use self::trailing::{AllowTrailing, RejectTrailing};
mod endian;
mod int;
mod legacy;
mod limit;
mod trailing;
/// The default options for bincode serialization/deserialization.
///
/// ### Defaults
/// By default bincode will use little-endian encoding for multi-byte integers, and will not
/// limit the number of serialized/deserialized bytes.
#[derive(Copy, Clone)]
pub struct DefaultOptions(Infinite);
impl DefaultOptions {
/// Get a default configuration object.
///
/// ### Default Configuration:
///
/// | Byte limit | Endianness | Int Encoding | Trailing Behavior |
/// |------------|------------|--------------|-------------------|
/// | Unlimited | Little | Varint | Reject |
pub fn new() -> DefaultOptions {
DefaultOptions(Infinite)
}
}
impl Default for DefaultOptions {
fn default() -> Self {
Self::new()
}
}
impl InternalOptions for DefaultOptions {
type Limit = Infinite;
type Endian = LittleEndian;
type IntEncoding = VarintEncoding;
type Trailing = RejectTrailing;
#[inline(always)]
fn limit(&mut self) -> &mut Infinite {
&mut self.0
}
}
/// A configuration builder trait whose options Bincode will use
/// while serializing and deserializing.
///
/// ### Options
/// Endianness: The endianness with which multi-byte integers will be read/written. *default: little endian*
///
/// Limit: The maximum number of bytes that will be read/written in a bincode serialize/deserialize. *default: unlimited*
///
/// Int Encoding: The encoding used for numbers, enum discriminants, and lengths. *default: varint*
///
/// Trailing Behavior: The behavior when there are trailing bytes left over in a slice after deserialization. *default: reject*
///
/// ### Byte Limit Details
/// The purpose of byte-limiting is to prevent Denial-Of-Service attacks whereby malicious attackers get bincode
/// deserialization to crash your process by allocating too much memory or keeping a connection open for too long.
///
/// When a byte limit is set, bincode will return `Err` on any deserialization that goes over the limit, or any
/// serialization that goes over the limit.
/// Sets the byte limit to be unlimited.
/// This is the default.
pub trait Options: InternalOptions + Sized {
/// Sets the byte limit to be unlimited.
/// This is the default.
fn with_no_limit(self) -> WithOtherLimit<Self, Infinite> {
WithOtherLimit::new(self, Infinite)
}
/// Sets the byte limit to `limit`.
fn with_limit(self, limit: u64) -> WithOtherLimit<Self, Bounded> {
WithOtherLimit::new(self, Bounded(limit))
}
/// Sets the endianness to little-endian
/// This is the default.
fn with_little_endian(self) -> WithOtherEndian<Self, LittleEndian> {
WithOtherEndian::new(self)
}
/// Sets the endianness to big-endian
fn with_big_endian(self) -> WithOtherEndian<Self, BigEndian> {
WithOtherEndian::new(self)
}
/// Sets the endianness to the the machine-native endianness
fn with_native_endian(self) -> WithOtherEndian<Self, NativeEndian> {
WithOtherEndian::new(self)
}
/// Sets the length encoding to varint
fn with_varint_encoding(self) -> WithOtherIntEncoding<Self, VarintEncoding> {
WithOtherIntEncoding::new(self)
}
/// Sets the length encoding to be fixed
fn with_fixint_encoding(self) -> WithOtherIntEncoding<Self, FixintEncoding> {
WithOtherIntEncoding::new(self)
}
/// Sets the deserializer to reject trailing bytes
fn reject_trailing_bytes(self) -> WithOtherTrailing<Self, RejectTrailing> {
WithOtherTrailing::new(self)
}
/// Sets the deserializer to allow trailing bytes
fn allow_trailing_bytes(self) -> WithOtherTrailing<Self, AllowTrailing> {
WithOtherTrailing::new(self)
}
/// Serializes a serializable object into a `Vec` of bytes using this configuration
#[inline(always)]
fn serialize<S: ?Sized + serde::Serialize>(self, t: &S) -> Result<Vec<u8>> {
::internal::serialize(t, self)
}
/// Returns the size that an object would be if serialized using Bincode with this configuration
#[inline(always)]
fn serialized_size<T: ?Sized + serde::Serialize>(self, t: &T) -> Result<u64> {
::internal::serialized_size(t, self)
}
/// Serializes an object directly into a `Writer` using this configuration
///
/// If the serialization would take more bytes than allowed by the size limit, an error
/// is returned and *no bytes* will be written into the `Writer`
#[inline(always)]
fn serialize_into<W: Write, T: ?Sized + serde::Serialize>(self, w: W, t: &T) -> Result<()> {
::internal::serialize_into(w, t, self)
}
/// Deserializes a slice of bytes into an instance of `T` using this configuration
#[inline(always)]
fn deserialize<'a, T: serde::Deserialize<'a>>(self, bytes: &'a [u8]) -> Result<T> {
::internal::deserialize(bytes, self)
}
/// TODO: document
#[doc(hidden)]
#[inline(always)]
fn deserialize_in_place<'a, R, T>(self, reader: R, place: &mut T) -> Result<()>
where
R: BincodeRead<'a>,
T: serde::de::Deserialize<'a>,
{
::internal::deserialize_in_place(reader, self, place)
}
/// Deserializes a slice of bytes with state `seed` using this configuration.
#[inline(always)]
fn deserialize_seed<'a, T: serde::de::DeserializeSeed<'a>>(
self,
seed: T,
bytes: &'a [u8],
) -> Result<T::Value> {
::internal::deserialize_seed(seed, bytes, self)
}
/// Deserializes an object directly from a `Read`er using this configuration
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
fn deserialize_from<R: Read, T: serde::de::DeserializeOwned>(self, reader: R) -> Result<T> {
::internal::deserialize_from(reader, self)
}
/// Deserializes an object directly from a `Read`er with state `seed` using this configuration
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
fn deserialize_from_seed<'a, R: Read, T: serde::de::DeserializeSeed<'a>>(
self,
seed: T,
reader: R,
) -> Result<T::Value> {
::internal::deserialize_from_seed(seed, reader, self)
}
/// Deserializes an object from a custom `BincodeRead`er using the default configuration.
/// It is highly recommended to use `deserialize_from` unless you need to implement
/// `BincodeRead` for performance reasons.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
fn deserialize_from_custom<'a, R: BincodeRead<'a>, T: serde::de::DeserializeOwned>(
self,
reader: R,
) -> Result<T> {
::internal::deserialize_from_custom(reader, self)
}
/// Deserializes an object from a custom `BincodeRead`er with state `seed` using the default
/// configuration. It is highly recommended to use `deserialize_from` unless you need to
/// implement `BincodeRead` for performance reasons.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
#[inline(always)]
fn deserialize_from_custom_seed<'a, R: BincodeRead<'a>, T: serde::de::DeserializeSeed<'a>>(
self,
seed: T,
reader: R,
) -> Result<T::Value> {
::internal::deserialize_from_custom_seed(seed, reader, self)
}
}
impl<T: InternalOptions> Options for T {}
/// A configuration struct with a user-specified byte limit
#[derive(Clone, Copy)]
pub struct WithOtherLimit<O: Options, L: SizeLimit> {
_options: O,
pub(crate) new_limit: L,
}
/// A configuration struct with a user-specified endian order
#[derive(Clone, Copy)]
pub struct WithOtherEndian<O: Options, E: BincodeByteOrder> {
options: O,
_endian: PhantomData<E>,
}
/// A configuration struct with a user-specified length encoding
pub struct WithOtherIntEncoding<O: Options, I: IntEncoding> {
options: O,
_length: PhantomData<I>,
}
/// A configuration struct with a user-specified trailing bytes behavior.
pub struct WithOtherTrailing<O: Options, T: TrailingBytes> {
options: O,
_trailing: PhantomData<T>,
}
impl<O: Options, L: SizeLimit> WithOtherLimit<O, L> {
#[inline(always)]
pub(crate) fn new(options: O, limit: L) -> WithOtherLimit<O, L> {
WithOtherLimit {
_options: options,
new_limit: limit,
}
}
}
impl<O: Options, E: BincodeByteOrder> WithOtherEndian<O, E> {
#[inline(always)]
pub(crate) fn new(options: O) -> WithOtherEndian<O, E> {
WithOtherEndian {
options,
_endian: PhantomData,
}
}
}
impl<O: Options, I: IntEncoding> WithOtherIntEncoding<O, I> {
#[inline(always)]
pub(crate) fn new(options: O) -> WithOtherIntEncoding<O, I> {
WithOtherIntEncoding {
options,
_length: PhantomData,
}
}
}
impl<O: Options, T: TrailingBytes> WithOtherTrailing<O, T> {
#[inline(always)]
pub(crate) fn new(options: O) -> WithOtherTrailing<O, T> {
WithOtherTrailing {
options,
_trailing: PhantomData,
}
}
}
impl<O: Options, E: BincodeByteOrder + 'static> InternalOptions for WithOtherEndian<O, E> {
type Limit = O::Limit;
type Endian = E;
type IntEncoding = O::IntEncoding;
type Trailing = O::Trailing;
#[inline(always)]
fn limit(&mut self) -> &mut O::Limit {
self.options.limit()
}
}
impl<O: Options, L: SizeLimit + 'static> InternalOptions for WithOtherLimit<O, L> {
type Limit = L;
type Endian = O::Endian;
type IntEncoding = O::IntEncoding;
type Trailing = O::Trailing;
fn limit(&mut self) -> &mut L {
&mut self.new_limit
}
}
impl<O: Options, I: IntEncoding + 'static> InternalOptions for WithOtherIntEncoding<O, I> {
type Limit = O::Limit;
type Endian = O::Endian;
type IntEncoding = I;
type Trailing = O::Trailing;
fn limit(&mut self) -> &mut O::Limit {
self.options.limit()
}
}
impl<O: Options, T: TrailingBytes + 'static> InternalOptions for WithOtherTrailing<O, T> {
type Limit = O::Limit;
type Endian = O::Endian;
type IntEncoding = O::IntEncoding;
type Trailing = T;
fn limit(&mut self) -> &mut O::Limit {
self.options.limit()
}
}
mod internal {
use super::*;
pub trait InternalOptions {
type Limit: SizeLimit + 'static;
type Endian: BincodeByteOrder + 'static;
type IntEncoding: IntEncoding + 'static;
type Trailing: TrailingBytes + 'static;
fn limit(&mut self) -> &mut Self::Limit;
}
impl<'a, O: InternalOptions> InternalOptions for &'a mut O {
type Limit = O::Limit;
type Endian = O::Endian;
type IntEncoding = O::IntEncoding;
type Trailing = O::Trailing;
#[inline(always)]
fn limit(&mut self) -> &mut Self::Limit {
(*self).limit()
}
}
}

Просмотреть файл

@ -1,37 +0,0 @@
use de::read::SliceReader;
use {ErrorKind, Result};
/// A trait for erroring deserialization if not all bytes were read.
pub trait TrailingBytes {
/// Checks a given slice reader to determine if deserialization used all bytes in the slice.
fn check_end(reader: &SliceReader) -> Result<()>;
}
/// A TrailingBytes config that will allow trailing bytes in slices after deserialization.
#[derive(Copy, Clone)]
pub struct AllowTrailing;
/// A TrailingBytes config that will cause bincode to produce an error if bytes are left over in the slice when deserialization is complete.
#[derive(Copy, Clone)]
pub struct RejectTrailing;
impl TrailingBytes for AllowTrailing {
#[inline(always)]
fn check_end(_reader: &SliceReader) -> Result<()> {
Ok(())
}
}
impl TrailingBytes for RejectTrailing {
#[inline(always)]
fn check_end(reader: &SliceReader) -> Result<()> {
if reader.is_finished() {
Ok(())
} else {
Err(Box::new(ErrorKind::Custom(
"Slice had bytes remaining after deserialization".to_string(),
)))
}
}
}

980
third_party/rust/bincode/src/de/mod.rs поставляемый
Просмотреть файл

@ -1,515 +1,465 @@
use config::{BincodeByteOrder, Options};
use std::io::Read;
use self::read::{BincodeRead, IoReader, SliceReader};
use byteorder::ReadBytesExt;
use config::{IntEncoding, SizeLimit};
use serde;
use serde::de::Error as DeError;
use serde::de::IntoDeserializer;
use {Error, ErrorKind, Result};
/// Specialized ways to read data into bincode.
pub mod read;
/// A Deserializer that reads bytes from a buffer.
///
/// This struct should rarely be used.
/// In most cases, prefer the `deserialize_from` function.
///
/// The ByteOrder that is chosen will impact the endianness that
/// is used to read integers out of the reader.
///
/// ```ignore
/// let d = Deserializer::new(&mut some_reader, SizeLimit::new());
/// serde::Deserialize::deserialize(&mut deserializer);
/// let bytes_read = d.bytes_read();
/// ```
pub struct Deserializer<R, O: Options> {
pub(crate) reader: R,
options: O,
}
macro_rules! impl_deserialize_literal {
($name:ident : $ty:ty = $read:ident()) => {
#[inline]
pub(crate) fn $name(&mut self) -> Result<$ty> {
self.read_literal_type::<$ty>()?;
self.reader
.$read::<<O::Endian as BincodeByteOrder>::Endian>()
.map_err(Into::into)
}
};
}
impl<'de, IR: Read, O: Options> Deserializer<IoReader<IR>, O> {
/// Creates a new Deserializer with a given `Read`er and options.
pub fn with_reader(r: IR, options: O) -> Self {
Deserializer {
reader: IoReader::new(r),
options,
}
}
}
impl<'de, O: Options> Deserializer<SliceReader<'de>, O> {
/// Creates a new Deserializer that will read from the given slice.
pub fn from_slice(slice: &'de [u8], options: O) -> Self {
Deserializer {
reader: SliceReader::new(slice),
options,
}
}
}
impl<'de, R: BincodeRead<'de>, O: Options> Deserializer<R, O> {
/// Creates a new Deserializer with the given `BincodeRead`er
pub fn with_bincode_read(r: R, options: O) -> Deserializer<R, O> {
Deserializer { reader: r, options }
}
pub(crate) fn deserialize_byte(&mut self) -> Result<u8> {
self.read_literal_type::<u8>()?;
self.reader.read_u8().map_err(Into::into)
}
impl_deserialize_literal! { deserialize_literal_u16 : u16 = read_u16() }
impl_deserialize_literal! { deserialize_literal_u32 : u32 = read_u32() }
impl_deserialize_literal! { deserialize_literal_u64 : u64 = read_u64() }
serde_if_integer128! {
impl_deserialize_literal! { deserialize_literal_u128 : u128 = read_u128() }
}
fn read_bytes(&mut self, count: u64) -> Result<()> {
self.options.limit().add(count)
}
fn read_literal_type<T>(&mut self) -> Result<()> {
use std::mem::size_of;
self.read_bytes(size_of::<T>() as u64)
}
fn read_vec(&mut self) -> Result<Vec<u8>> {
let len = O::IntEncoding::deserialize_len(self)?;
self.read_bytes(len as u64)?;
self.reader.get_byte_buffer(len)
}
fn read_string(&mut self) -> Result<String> {
let vec = self.read_vec()?;
String::from_utf8(vec).map_err(|e| ErrorKind::InvalidUtf8Encoding(e.utf8_error()).into())
}
}
macro_rules! impl_deserialize_int {
($name:ident = $visitor_method:ident ($dser_method:ident)) => {
#[inline]
fn $name<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.$visitor_method(O::IntEncoding::$dser_method(self)?)
}
};
}
impl<'de, 'a, R, O> serde::Deserializer<'de> for &'a mut Deserializer<R, O>
where
R: BincodeRead<'de>,
O: Options,
{
type Error = Error;
#[inline]
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
Err(Box::new(ErrorKind::DeserializeAnyNotSupported))
}
fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
match self.deserialize_byte()? {
1 => visitor.visit_bool(true),
0 => visitor.visit_bool(false),
value => Err(ErrorKind::InvalidBoolEncoding(value).into()),
}
}
impl_deserialize_int!(deserialize_u16 = visit_u16(deserialize_u16));
impl_deserialize_int!(deserialize_u32 = visit_u32(deserialize_u32));
impl_deserialize_int!(deserialize_u64 = visit_u64(deserialize_u64));
impl_deserialize_int!(deserialize_i16 = visit_i16(deserialize_i16));
impl_deserialize_int!(deserialize_i32 = visit_i32(deserialize_i32));
impl_deserialize_int!(deserialize_i64 = visit_i64(deserialize_i64));
fn deserialize_f32<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
self.read_literal_type::<f32>()?;
let value = self
.reader
.read_f32::<<O::Endian as BincodeByteOrder>::Endian>()?;
visitor.visit_f32(value)
}
fn deserialize_f64<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
self.read_literal_type::<f64>()?;
let value = self
.reader
.read_f64::<<O::Endian as BincodeByteOrder>::Endian>()?;
visitor.visit_f64(value)
}
serde_if_integer128! {
impl_deserialize_int!(deserialize_u128 = visit_u128(deserialize_u128));
impl_deserialize_int!(deserialize_i128 = visit_i128(deserialize_i128));
}
#[inline]
fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_u8(self.deserialize_byte()? as u8)
}
#[inline]
fn deserialize_i8<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_i8(self.deserialize_byte()? as i8)
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_char<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
use std::str;
let error = || ErrorKind::InvalidCharEncoding.into();
let mut buf = [0u8; 4];
// Look at the first byte to see how many bytes must be read
self.reader.read_exact(&mut buf[..1])?;
let width = utf8_char_width(buf[0]);
if width == 1 {
return visitor.visit_char(buf[0] as char);
}
if width == 0 {
return Err(error());
}
if self.reader.read_exact(&mut buf[1..width]).is_err() {
return Err(error());
}
let res = str::from_utf8(&buf[..width])
.ok()
.and_then(|s| s.chars().next())
.ok_or_else(error)?;
visitor.visit_char(res)
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let len = O::IntEncoding::deserialize_len(self)?;
self.read_bytes(len as u64)?;
self.reader.forward_read_str(len, visitor)
}
fn deserialize_string<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_string(self.read_string()?)
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let len = O::IntEncoding::deserialize_len(self)?;
self.read_bytes(len as u64)?;
self.reader.forward_read_bytes(len, visitor)
}
fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_byte_buf(self.read_vec()?)
}
fn deserialize_enum<V>(
self,
_enum: &'static str,
_variants: &'static [&'static str],
visitor: V,
) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
impl<'de, 'a, R: 'a, O> serde::de::EnumAccess<'de> for &'a mut Deserializer<R, O>
where
R: BincodeRead<'de>,
O: Options,
{
type Error = Error;
type Variant = Self;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant)>
where
V: serde::de::DeserializeSeed<'de>,
{
let idx: u32 = O::IntEncoding::deserialize_u32(self)?;
let val: Result<_> = seed.deserialize(idx.into_deserializer());
Ok((val?, self))
}
}
visitor.visit_enum(self)
}
fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
struct Access<'a, R: Read + 'a, O: Options + 'a> {
deserializer: &'a mut Deserializer<R, O>,
len: usize,
}
impl<'de, 'a, 'b: 'a, R: BincodeRead<'de> + 'b, O: Options> serde::de::SeqAccess<'de>
for Access<'a, R, O>
{
type Error = Error;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: serde::de::DeserializeSeed<'de>,
{
if self.len > 0 {
self.len -= 1;
let value =
serde::de::DeserializeSeed::deserialize(seed, &mut *self.deserializer)?;
Ok(Some(value))
} else {
Ok(None)
}
}
fn size_hint(&self) -> Option<usize> {
Some(self.len)
}
}
visitor.visit_seq(Access {
deserializer: self,
len,
})
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let value: u8 = serde::de::Deserialize::deserialize(&mut *self)?;
match value {
0 => visitor.visit_none(),
1 => visitor.visit_some(&mut *self),
v => Err(ErrorKind::InvalidTagEncoding(v as usize).into()),
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let len = O::IntEncoding::deserialize_len(self)?;
self.deserialize_tuple(len, visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
struct Access<'a, R: Read + 'a, O: Options + 'a> {
deserializer: &'a mut Deserializer<R, O>,
len: usize,
}
impl<'de, 'a, 'b: 'a, R: BincodeRead<'de> + 'b, O: Options> serde::de::MapAccess<'de>
for Access<'a, R, O>
{
type Error = Error;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where
K: serde::de::DeserializeSeed<'de>,
{
if self.len > 0 {
self.len -= 1;
let key =
serde::de::DeserializeSeed::deserialize(seed, &mut *self.deserializer)?;
Ok(Some(key))
} else {
Ok(None)
}
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where
V: serde::de::DeserializeSeed<'de>,
{
let value = serde::de::DeserializeSeed::deserialize(seed, &mut *self.deserializer)?;
Ok(value)
}
fn size_hint(&self) -> Option<usize> {
Some(self.len)
}
}
let len = O::IntEncoding::deserialize_len(self)?;
visitor.visit_map(Access {
deserializer: self,
len,
})
}
fn deserialize_struct<V>(
self,
_name: &str,
fields: &'static [&'static str],
visitor: V,
) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
self.deserialize_tuple(fields.len(), visitor)
}
fn deserialize_identifier<V>(self, _visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let message = "Bincode does not support Deserializer::deserialize_identifier";
Err(Error::custom(message))
}
fn deserialize_newtype_struct<V>(self, _name: &str, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_unit_struct<V>(self, _name: &'static str, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_tuple_struct<V>(
self,
_name: &'static str,
len: usize,
visitor: V,
) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
self.deserialize_tuple(len, visitor)
}
fn deserialize_ignored_any<V>(self, _visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let message = "Bincode does not support Deserializer::deserialize_ignored_any";
Err(Error::custom(message))
}
fn is_human_readable(&self) -> bool {
false
}
}
impl<'de, 'a, R, O> serde::de::VariantAccess<'de> for &'a mut Deserializer<R, O>
where
R: BincodeRead<'de>,
O: Options,
{
type Error = Error;
fn unit_variant(self) -> Result<()> {
Ok(())
}
fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value>
where
T: serde::de::DeserializeSeed<'de>,
{
serde::de::DeserializeSeed::deserialize(seed, self)
}
fn tuple_variant<V>(self, len: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
serde::de::Deserializer::deserialize_tuple(self, len, visitor)
}
fn struct_variant<V>(self, fields: &'static [&'static str], visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
serde::de::Deserializer::deserialize_tuple(self, fields.len(), visitor)
}
}
static UTF8_CHAR_WIDTH: [u8; 256] = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, // 0x1F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, // 0x3F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, // 0x5F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, // 0x7F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, // 0x9F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, // 0xBF
0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, // 0xDF
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xEF
4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xFF
];
// This function is a copy of core::str::utf8_char_width
fn utf8_char_width(b: u8) -> usize {
UTF8_CHAR_WIDTH[b as usize] as usize
}
use config::Options;
use std::io::Read;
use self::read::BincodeRead;
use byteorder::ReadBytesExt;
use internal::SizeLimit;
use serde;
use serde::de::Error as DeError;
use serde::de::IntoDeserializer;
use {Error, ErrorKind, Result};
pub mod read;
/// A Deserializer that reads bytes from a buffer.
///
/// This struct should rarely be used.
/// In most cases, prefer the `deserialize_from` function.
///
/// The ByteOrder that is chosen will impact the endianness that
/// is used to read integers out of the reader.
///
/// ```ignore
/// let d = Deserializer::new(&mut some_reader, SizeLimit::new());
/// serde::Deserialize::deserialize(&mut deserializer);
/// let bytes_read = d.bytes_read();
/// ```
pub(crate) struct Deserializer<R, O: Options> {
reader: R,
options: O,
}
impl<'de, R: BincodeRead<'de>, O: Options> Deserializer<R, O> {
/// Creates a new Deserializer with a given `Read`er and a size_limit.
pub(crate) fn new(r: R, options: O) -> Deserializer<R, O> {
Deserializer {
reader: r,
options: options,
}
}
fn read_bytes(&mut self, count: u64) -> Result<()> {
self.options.limit().add(count)
}
fn read_type<T>(&mut self) -> Result<()> {
use std::mem::size_of;
self.read_bytes(size_of::<T>() as u64)
}
fn read_vec(&mut self) -> Result<Vec<u8>> {
let len: usize = try!(serde::Deserialize::deserialize(&mut *self));
self.read_bytes(len as u64)?;
self.reader.get_byte_buffer(len)
}
fn read_string(&mut self) -> Result<String> {
let vec = self.read_vec()?;
String::from_utf8(vec).map_err(|e| ErrorKind::InvalidUtf8Encoding(e.utf8_error()).into())
}
}
macro_rules! impl_nums {
($ty:ty, $dser_method:ident, $visitor_method:ident, $reader_method:ident) => {
#[inline]
fn $dser_method<V>(self, visitor: V) -> Result<V::Value>
where V: serde::de::Visitor<'de>,
{
try!(self.read_type::<$ty>());
let value = try!(self.reader.$reader_method::<O::Endian>());
visitor.$visitor_method(value)
}
}
}
impl<'de, 'a, R, O> serde::Deserializer<'de> for &'a mut Deserializer<R, O>
where
R: BincodeRead<'de>,
O: Options,
{
type Error = Error;
#[inline]
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
Err(Box::new(ErrorKind::DeserializeAnyNotSupported))
}
fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let value: u8 = try!(serde::Deserialize::deserialize(self));
match value {
1 => visitor.visit_bool(true),
0 => visitor.visit_bool(false),
value => Err(ErrorKind::InvalidBoolEncoding(value).into()),
}
}
impl_nums!(u16, deserialize_u16, visit_u16, read_u16);
impl_nums!(u32, deserialize_u32, visit_u32, read_u32);
impl_nums!(u64, deserialize_u64, visit_u64, read_u64);
impl_nums!(i16, deserialize_i16, visit_i16, read_i16);
impl_nums!(i32, deserialize_i32, visit_i32, read_i32);
impl_nums!(i64, deserialize_i64, visit_i64, read_i64);
impl_nums!(f32, deserialize_f32, visit_f32, read_f32);
impl_nums!(f64, deserialize_f64, visit_f64, read_f64);
serde_if_integer128! {
impl_nums!(u128, deserialize_u128, visit_u128, read_u128);
impl_nums!(i128, deserialize_i128, visit_i128, read_i128);
}
#[inline]
fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
try!(self.read_type::<u8>());
visitor.visit_u8(try!(self.reader.read_u8()))
}
#[inline]
fn deserialize_i8<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
try!(self.read_type::<i8>());
visitor.visit_i8(try!(self.reader.read_i8()))
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_char<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
use std::str;
let error = || ErrorKind::InvalidCharEncoding.into();
let mut buf = [0u8; 4];
// Look at the first byte to see how many bytes must be read
let _ = try!(self.reader.read_exact(&mut buf[..1]));
let width = utf8_char_width(buf[0]);
if width == 1 {
return visitor.visit_char(buf[0] as char);
}
if width == 0 {
return Err(error());
}
if self.reader.read_exact(&mut buf[1..width]).is_err() {
return Err(error());
}
let res = try!(
str::from_utf8(&buf[..width])
.ok()
.and_then(|s| s.chars().next())
.ok_or(error())
);
visitor.visit_char(res)
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let len: usize = try!(serde::Deserialize::deserialize(&mut *self));
try!(self.read_bytes(len as u64));
self.reader.forward_read_str(len, visitor)
}
fn deserialize_string<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_string(try!(self.read_string()))
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let len: usize = try!(serde::Deserialize::deserialize(&mut *self));
try!(self.read_bytes(len as u64));
self.reader.forward_read_bytes(len, visitor)
}
fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_byte_buf(try!(self.read_vec()))
}
fn deserialize_enum<V>(
self,
_enum: &'static str,
_variants: &'static [&'static str],
visitor: V,
) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
impl<'de, 'a, R: 'a, O> serde::de::EnumAccess<'de> for &'a mut Deserializer<R, O>
where
R: BincodeRead<'de>,
O: Options,
{
type Error = Error;
type Variant = Self;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant)>
where
V: serde::de::DeserializeSeed<'de>,
{
let idx: u32 = try!(serde::de::Deserialize::deserialize(&mut *self));
let val: Result<_> = seed.deserialize(idx.into_deserializer());
Ok((try!(val), self))
}
}
visitor.visit_enum(self)
}
fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
struct Access<'a, R: Read + 'a, O: Options + 'a> {
deserializer: &'a mut Deserializer<R, O>,
len: usize,
}
impl<'de, 'a, 'b: 'a, R: BincodeRead<'de> + 'b, O: Options> serde::de::SeqAccess<'de>
for Access<'a, R, O>
{
type Error = Error;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: serde::de::DeserializeSeed<'de>,
{
if self.len > 0 {
self.len -= 1;
let value = try!(serde::de::DeserializeSeed::deserialize(
seed,
&mut *self.deserializer,
));
Ok(Some(value))
} else {
Ok(None)
}
}
fn size_hint(&self) -> Option<usize> {
Some(self.len)
}
}
visitor.visit_seq(Access {
deserializer: self,
len: len,
})
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let value: u8 = try!(serde::de::Deserialize::deserialize(&mut *self));
match value {
0 => visitor.visit_none(),
1 => visitor.visit_some(&mut *self),
v => Err(ErrorKind::InvalidTagEncoding(v as usize).into()),
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let len = try!(serde::Deserialize::deserialize(&mut *self));
self.deserialize_tuple(len, visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
struct Access<'a, R: Read + 'a, O: Options + 'a> {
deserializer: &'a mut Deserializer<R, O>,
len: usize,
}
impl<'de, 'a, 'b: 'a, R: BincodeRead<'de> + 'b, O: Options> serde::de::MapAccess<'de>
for Access<'a, R, O>
{
type Error = Error;
fn next_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>>
where
K: serde::de::DeserializeSeed<'de>,
{
if self.len > 0 {
self.len -= 1;
let key = try!(serde::de::DeserializeSeed::deserialize(
seed,
&mut *self.deserializer,
));
Ok(Some(key))
} else {
Ok(None)
}
}
fn next_value_seed<V>(&mut self, seed: V) -> Result<V::Value>
where
V: serde::de::DeserializeSeed<'de>,
{
let value = try!(serde::de::DeserializeSeed::deserialize(
seed,
&mut *self.deserializer,
));
Ok(value)
}
fn size_hint(&self) -> Option<usize> {
Some(self.len)
}
}
let len = try!(serde::Deserialize::deserialize(&mut *self));
visitor.visit_map(Access {
deserializer: self,
len: len,
})
}
fn deserialize_struct<V>(
self,
_name: &str,
fields: &'static [&'static str],
visitor: V,
) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
self.deserialize_tuple(fields.len(), visitor)
}
fn deserialize_identifier<V>(self, _visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let message = "Bincode does not support Deserializer::deserialize_identifier";
Err(Error::custom(message))
}
fn deserialize_newtype_struct<V>(self, _name: &str, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
fn deserialize_unit_struct<V>(self, _name: &'static str, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
visitor.visit_unit()
}
fn deserialize_tuple_struct<V>(
self,
_name: &'static str,
len: usize,
visitor: V,
) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
self.deserialize_tuple(len, visitor)
}
fn deserialize_ignored_any<V>(self, _visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
let message = "Bincode does not support Deserializer::deserialize_ignored_any";
Err(Error::custom(message))
}
fn is_human_readable(&self) -> bool {
false
}
}
impl<'de, 'a, R, O> serde::de::VariantAccess<'de> for &'a mut Deserializer<R, O>
where
R: BincodeRead<'de>,
O: Options,
{
type Error = Error;
fn unit_variant(self) -> Result<()> {
Ok(())
}
fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value>
where
T: serde::de::DeserializeSeed<'de>,
{
serde::de::DeserializeSeed::deserialize(seed, self)
}
fn tuple_variant<V>(self, len: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
serde::de::Deserializer::deserialize_tuple(self, len, visitor)
}
fn struct_variant<V>(self, fields: &'static [&'static str], visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'de>,
{
serde::de::Deserializer::deserialize_tuple(self, fields.len(), visitor)
}
}
static UTF8_CHAR_WIDTH: [u8; 256] = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, // 0x1F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, // 0x3F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, // 0x5F
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, // 0x7F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, // 0x9F
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, // 0xBF
0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, // 0xDF
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xEF
4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xFF
];
// This function is a copy of core::str::utf8_char_width
fn utf8_char_width(b: u8) -> usize {
UTF8_CHAR_WIDTH[b as usize] as usize
}

403
third_party/rust/bincode/src/de/read.rs поставляемый
Просмотреть файл

@ -1,202 +1,201 @@
use error::Result;
use serde;
use std::io;
/// An optional Read trait for advanced Bincode usage.
///
/// It is highly recommended to use bincode with `io::Read` or `&[u8]` before
/// implementing a custom `BincodeRead`.
///
/// The forward_read_* methods are necessary because some byte sources want
/// to pass a long-lived borrow to the visitor and others want to pass a
/// transient slice.
pub trait BincodeRead<'storage>: io::Read {
/// Check that the next `length` bytes are a valid string and pass
/// it on to the serde reader.
fn forward_read_str<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>;
/// Transfer ownership of the next `length` bytes to the caller.
fn get_byte_buffer(&mut self, length: usize) -> Result<Vec<u8>>;
/// Pass a slice of the next `length` bytes on to the serde reader.
fn forward_read_bytes<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>;
}
/// A BincodeRead implementation for byte slices
pub struct SliceReader<'storage> {
slice: &'storage [u8],
}
/// A BincodeRead implementation for `io::Read`ers
pub struct IoReader<R> {
reader: R,
temp_buffer: Vec<u8>,
}
impl<'storage> SliceReader<'storage> {
/// Constructs a slice reader
pub(crate) fn new(bytes: &'storage [u8]) -> SliceReader<'storage> {
SliceReader { slice: bytes }
}
#[inline(always)]
fn get_byte_slice(&mut self, length: usize) -> Result<&'storage [u8]> {
if length > self.slice.len() {
return Err(SliceReader::unexpected_eof());
}
let (read_slice, remaining) = self.slice.split_at(length);
self.slice = remaining;
Ok(read_slice)
}
pub(crate) fn is_finished(&self) -> bool {
self.slice.is_empty()
}
}
impl<R> IoReader<R> {
/// Constructs an IoReadReader
pub(crate) fn new(r: R) -> IoReader<R> {
IoReader {
reader: r,
temp_buffer: vec![],
}
}
}
impl<'storage> io::Read for SliceReader<'storage> {
#[inline(always)]
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
if out.len() > self.slice.len() {
return Err(io::ErrorKind::UnexpectedEof.into());
}
let (read_slice, remaining) = self.slice.split_at(out.len());
out.copy_from_slice(read_slice);
self.slice = remaining;
Ok(out.len())
}
#[inline(always)]
fn read_exact(&mut self, out: &mut [u8]) -> io::Result<()> {
self.read(out).map(|_| ())
}
}
impl<R: io::Read> io::Read for IoReader<R> {
#[inline(always)]
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
self.reader.read(out)
}
#[inline(always)]
fn read_exact(&mut self, out: &mut [u8]) -> io::Result<()> {
self.reader.read_exact(out)
}
}
impl<'storage> SliceReader<'storage> {
#[inline(always)]
fn unexpected_eof() -> Box<::ErrorKind> {
Box::new(::ErrorKind::Io(io::Error::new(
io::ErrorKind::UnexpectedEof,
"",
)))
}
}
impl<'storage> BincodeRead<'storage> for SliceReader<'storage> {
#[inline(always)]
fn forward_read_str<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>,
{
use ErrorKind;
let string = match ::std::str::from_utf8(self.get_byte_slice(length)?) {
Ok(s) => s,
Err(e) => return Err(ErrorKind::InvalidUtf8Encoding(e).into()),
};
visitor.visit_borrowed_str(string)
}
#[inline(always)]
fn get_byte_buffer(&mut self, length: usize) -> Result<Vec<u8>> {
self.get_byte_slice(length).map(|x| x.to_vec())
}
#[inline(always)]
fn forward_read_bytes<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>,
{
visitor.visit_borrowed_bytes(self.get_byte_slice(length)?)
}
}
impl<R> IoReader<R>
where
R: io::Read,
{
fn fill_buffer(&mut self, length: usize) -> Result<()> {
self.temp_buffer.resize(length, 0);
self.reader.read_exact(&mut self.temp_buffer)?;
Ok(())
}
}
impl<'a, R> BincodeRead<'a> for IoReader<R>
where
R: io::Read,
{
fn forward_read_str<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'a>,
{
self.fill_buffer(length)?;
let string = match ::std::str::from_utf8(&self.temp_buffer[..]) {
Ok(s) => s,
Err(e) => return Err(::ErrorKind::InvalidUtf8Encoding(e).into()),
};
visitor.visit_str(string)
}
fn get_byte_buffer(&mut self, length: usize) -> Result<Vec<u8>> {
self.fill_buffer(length)?;
Ok(::std::mem::replace(&mut self.temp_buffer, Vec::new()))
}
fn forward_read_bytes<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'a>,
{
self.fill_buffer(length)?;
visitor.visit_bytes(&self.temp_buffer[..])
}
}
#[cfg(test)]
mod test {
use super::IoReader;
#[test]
fn test_fill_buffer() {
let buffer = vec![0u8; 64];
let mut reader = IoReader::new(buffer.as_slice());
reader.fill_buffer(20).unwrap();
assert_eq!(20, reader.temp_buffer.len());
reader.fill_buffer(30).unwrap();
assert_eq!(30, reader.temp_buffer.len());
reader.fill_buffer(5).unwrap();
assert_eq!(5, reader.temp_buffer.len());
}
}
use error::Result;
use serde;
use std::{io, slice};
/// An optional Read trait for advanced Bincode usage.
///
/// It is highly recommended to use bincode with `io::Read` or `&[u8]` before
/// implementing a custom `BincodeRead`.
pub trait BincodeRead<'storage>: io::Read {
/// Forwards reading `length` bytes of a string on to the serde reader.
fn forward_read_str<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>;
/// Return the first `length` bytes of the internal byte buffer.
fn get_byte_buffer(&mut self, length: usize) -> Result<Vec<u8>>;
/// Forwards reading `length` bytes on to the serde reader.
fn forward_read_bytes<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>;
}
/// A BincodeRead implementation for byte slices
/// NOT A PART OF THE STABLE PUBLIC API
#[doc(hidden)]
pub struct SliceReader<'storage> {
slice: &'storage [u8],
}
/// A BincodeRead implementation for io::Readers
/// NOT A PART OF THE STABLE PUBLIC API
#[doc(hidden)]
pub struct IoReader<R> {
reader: R,
temp_buffer: Vec<u8>,
}
impl<'storage> SliceReader<'storage> {
/// Constructs a slice reader
pub fn new(bytes: &'storage [u8]) -> SliceReader<'storage> {
SliceReader { slice: bytes }
}
}
impl<R> IoReader<R> {
/// Constructs an IoReadReader
pub fn new(r: R) -> IoReader<R> {
IoReader {
reader: r,
temp_buffer: vec![],
}
}
}
impl<'storage> io::Read for SliceReader<'storage> {
#[inline(always)]
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
(&mut self.slice).read(out)
}
#[inline(always)]
fn read_exact(&mut self, out: &mut [u8]) -> io::Result<()> {
(&mut self.slice).read_exact(out)
}
}
impl<R: io::Read> io::Read for IoReader<R> {
#[inline(always)]
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
self.reader.read(out)
}
#[inline(always)]
fn read_exact(&mut self, out: &mut [u8]) -> io::Result<()> {
self.reader.read_exact(out)
}
}
impl<'storage> SliceReader<'storage> {
#[inline(always)]
fn unexpected_eof() -> Box<::ErrorKind> {
return Box::new(::ErrorKind::Io(io::Error::new(
io::ErrorKind::UnexpectedEof,
"",
)));
}
}
impl<'storage> BincodeRead<'storage> for SliceReader<'storage> {
#[inline(always)]
fn forward_read_str<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>,
{
use ErrorKind;
if length > self.slice.len() {
return Err(SliceReader::unexpected_eof());
}
let string = match ::std::str::from_utf8(&self.slice[..length]) {
Ok(s) => s,
Err(e) => return Err(ErrorKind::InvalidUtf8Encoding(e).into()),
};
let r = visitor.visit_borrowed_str(string);
self.slice = &self.slice[length..];
r
}
#[inline(always)]
fn get_byte_buffer(&mut self, length: usize) -> Result<Vec<u8>> {
if length > self.slice.len() {
return Err(SliceReader::unexpected_eof());
}
let r = &self.slice[..length];
self.slice = &self.slice[length..];
Ok(r.to_vec())
}
#[inline(always)]
fn forward_read_bytes<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>,
{
if length > self.slice.len() {
return Err(SliceReader::unexpected_eof());
}
let r = visitor.visit_borrowed_bytes(&self.slice[..length]);
self.slice = &self.slice[length..];
r
}
}
impl<R> IoReader<R>
where
R: io::Read,
{
fn fill_buffer(&mut self, length: usize) -> Result<()> {
// We first reserve the space needed in our buffer.
let current_length = self.temp_buffer.len();
if length > current_length {
self.temp_buffer.reserve_exact(length - current_length);
}
// Then create a slice with the length as our desired length. This is
// safe as long as we only write (no reads) to this buffer, because
// `reserve_exact` above has allocated this space.
let buf = unsafe {
slice::from_raw_parts_mut(self.temp_buffer.as_mut_ptr(), length)
};
// This method is assumed to properly handle slices which include
// uninitialized bytes (as ours does). See discussion at the link below.
// https://github.com/servo/bincode/issues/260
self.reader.read_exact(buf)?;
// Only after `read_exact` successfully returns do we set the buffer
// length. By doing this after the call to `read_exact`, we can avoid
// exposing uninitialized memory in the case of `read_exact` returning
// an error.
unsafe {
self.temp_buffer.set_len(length);
}
Ok(())
}
}
impl<'a, R> BincodeRead<'a> for IoReader<R>
where
R: io::Read,
{
fn forward_read_str<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'a>,
{
self.fill_buffer(length)?;
let string = match ::std::str::from_utf8(&self.temp_buffer[..]) {
Ok(s) => s,
Err(e) => return Err(::ErrorKind::InvalidUtf8Encoding(e).into()),
};
let r = visitor.visit_str(string);
r
}
fn get_byte_buffer(&mut self, length: usize) -> Result<Vec<u8>> {
self.fill_buffer(length)?;
Ok(::std::mem::replace(&mut self.temp_buffer, Vec::new()))
}
fn forward_read_bytes<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'a>,
{
self.fill_buffer(length)?;
let r = visitor.visit_bytes(&self.temp_buffer[..]);
r
}
}

230
third_party/rust/bincode/src/error.rs поставляемый
Просмотреть файл

@ -1,115 +1,115 @@
use std::error::Error as StdError;
use std::io;
use std::str::Utf8Error;
use std::{error, fmt};
use serde;
/// The result of a serialization or deserialization operation.
pub type Result<T> = ::std::result::Result<T, Error>;
/// An error that can be produced during (de)serializing.
pub type Error = Box<ErrorKind>;
/// The kind of error that can be produced during a serialization or deserialization.
#[derive(Debug)]
pub enum ErrorKind {
/// If the error stems from the reader/writer that is being used
/// during (de)serialization, that error will be stored and returned here.
Io(io::Error),
/// Returned if the deserializer attempts to deserialize a string that is not valid utf8
InvalidUtf8Encoding(Utf8Error),
/// Returned if the deserializer attempts to deserialize a bool that was
/// not encoded as either a 1 or a 0
InvalidBoolEncoding(u8),
/// Returned if the deserializer attempts to deserialize a char that is not in the correct format.
InvalidCharEncoding,
/// Returned if the deserializer attempts to deserialize the tag of an enum that is
/// not in the expected ranges
InvalidTagEncoding(usize),
/// Serde has a deserialize_any method that lets the format hint to the
/// object which route to take in deserializing.
DeserializeAnyNotSupported,
/// If (de)serializing a message takes more than the provided size limit, this
/// error is returned.
SizeLimit,
/// Bincode can not encode sequences of unknown length (like iterators).
SequenceMustHaveLength,
/// A custom error message from Serde.
Custom(String),
}
impl StdError for ErrorKind {
fn description(&self) -> &str {
match *self {
ErrorKind::Io(ref err) => error::Error::description(err),
ErrorKind::InvalidUtf8Encoding(_) => "string is not valid utf8",
ErrorKind::InvalidBoolEncoding(_) => "invalid u8 while decoding bool",
ErrorKind::InvalidCharEncoding => "char is not valid",
ErrorKind::InvalidTagEncoding(_) => "tag for enum is not valid",
ErrorKind::SequenceMustHaveLength => {
"Bincode can only encode sequences and maps that have a knowable size ahead of time"
}
ErrorKind::DeserializeAnyNotSupported => {
"Bincode doesn't support serde::Deserializer::deserialize_any"
}
ErrorKind::SizeLimit => "the size limit has been reached",
ErrorKind::Custom(ref msg) => msg,
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
ErrorKind::Io(ref err) => Some(err),
ErrorKind::InvalidUtf8Encoding(_) => None,
ErrorKind::InvalidBoolEncoding(_) => None,
ErrorKind::InvalidCharEncoding => None,
ErrorKind::InvalidTagEncoding(_) => None,
ErrorKind::SequenceMustHaveLength => None,
ErrorKind::DeserializeAnyNotSupported => None,
ErrorKind::SizeLimit => None,
ErrorKind::Custom(_) => None,
}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
ErrorKind::Io(err).into()
}
}
impl fmt::Display for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
ErrorKind::Io(ref ioerr) => write!(fmt, "io error: {}", ioerr),
ErrorKind::InvalidUtf8Encoding(ref e) => write!(fmt, "{}: {}", self.description(), e),
ErrorKind::InvalidBoolEncoding(b) => {
write!(fmt, "{}, expected 0 or 1, found {}", self.description(), b)
}
ErrorKind::InvalidCharEncoding => write!(fmt, "{}", self.description()),
ErrorKind::InvalidTagEncoding(tag) => {
write!(fmt, "{}, found {}", self.description(), tag)
}
ErrorKind::SequenceMustHaveLength => write!(fmt, "{}", self.description()),
ErrorKind::SizeLimit => write!(fmt, "{}", self.description()),
ErrorKind::DeserializeAnyNotSupported => write!(
fmt,
"Bincode does not support the serde::Deserializer::deserialize_any method"
),
ErrorKind::Custom(ref s) => s.fmt(fmt),
}
}
}
impl serde::de::Error for Error {
fn custom<T: fmt::Display>(desc: T) -> Error {
ErrorKind::Custom(desc.to_string()).into()
}
}
impl serde::ser::Error for Error {
fn custom<T: fmt::Display>(msg: T) -> Self {
ErrorKind::Custom(msg.to_string()).into()
}
}
use std::error::Error as StdError;
use std::io;
use std::str::Utf8Error;
use std::{error, fmt};
use serde;
/// The result of a serialization or deserialization operation.
pub type Result<T> = ::std::result::Result<T, Error>;
/// An error that can be produced during (de)serializing.
pub type Error = Box<ErrorKind>;
/// The kind of error that can be produced during a serialization or deserialization.
#[derive(Debug)]
pub enum ErrorKind {
/// If the error stems from the reader/writer that is being used
/// during (de)serialization, that error will be stored and returned here.
Io(io::Error),
/// Returned if the deserializer attempts to deserialize a string that is not valid utf8
InvalidUtf8Encoding(Utf8Error),
/// Returned if the deserializer attempts to deserialize a bool that was
/// not encoded as either a 1 or a 0
InvalidBoolEncoding(u8),
/// Returned if the deserializer attempts to deserialize a char that is not in the correct format.
InvalidCharEncoding,
/// Returned if the deserializer attempts to deserialize the tag of an enum that is
/// not in the expected ranges
InvalidTagEncoding(usize),
/// Serde has a deserialize_any method that lets the format hint to the
/// object which route to take in deserializing.
DeserializeAnyNotSupported,
/// If (de)serializing a message takes more than the provided size limit, this
/// error is returned.
SizeLimit,
/// Bincode can not encode sequences of unknown length (like iterators).
SequenceMustHaveLength,
/// A custom error message from Serde.
Custom(String),
}
impl StdError for ErrorKind {
fn description(&self) -> &str {
match *self {
ErrorKind::Io(ref err) => error::Error::description(err),
ErrorKind::InvalidUtf8Encoding(_) => "string is not valid utf8",
ErrorKind::InvalidBoolEncoding(_) => "invalid u8 while decoding bool",
ErrorKind::InvalidCharEncoding => "char is not valid",
ErrorKind::InvalidTagEncoding(_) => "tag for enum is not valid",
ErrorKind::SequenceMustHaveLength => {
"Bincode can only encode sequences and maps that have a knowable size ahead of time"
}
ErrorKind::DeserializeAnyNotSupported => {
"Bincode doesn't support serde::Deserializer::deserialize_any"
}
ErrorKind::SizeLimit => "the size limit has been reached",
ErrorKind::Custom(ref msg) => msg,
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
ErrorKind::Io(ref err) => Some(err),
ErrorKind::InvalidUtf8Encoding(_) => None,
ErrorKind::InvalidBoolEncoding(_) => None,
ErrorKind::InvalidCharEncoding => None,
ErrorKind::InvalidTagEncoding(_) => None,
ErrorKind::SequenceMustHaveLength => None,
ErrorKind::DeserializeAnyNotSupported => None,
ErrorKind::SizeLimit => None,
ErrorKind::Custom(_) => None,
}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
ErrorKind::Io(err).into()
}
}
impl fmt::Display for ErrorKind {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
ErrorKind::Io(ref ioerr) => write!(fmt, "io error: {}", ioerr),
ErrorKind::InvalidUtf8Encoding(ref e) => write!(fmt, "{}: {}", self.description(), e),
ErrorKind::InvalidBoolEncoding(b) => {
write!(fmt, "{}, expected 0 or 1, found {}", self.description(), b)
}
ErrorKind::InvalidCharEncoding => write!(fmt, "{}", self.description()),
ErrorKind::InvalidTagEncoding(tag) => {
write!(fmt, "{}, found {}", self.description(), tag)
}
ErrorKind::SequenceMustHaveLength => write!(fmt, "{}", self.description()),
ErrorKind::SizeLimit => write!(fmt, "{}", self.description()),
ErrorKind::DeserializeAnyNotSupported => write!(
fmt,
"Bincode does not support the serde::Deserializer::deserialize_any method"
),
ErrorKind::Custom(ref s) => s.fmt(fmt),
}
}
}
impl serde::de::Error for Error {
fn custom<T: fmt::Display>(desc: T) -> Error {
ErrorKind::Custom(desc.to_string()).into()
}
}
impl serde::ser::Error for Error {
fn custom<T: fmt::Display>(msg: T) -> Self {
ErrorKind::Custom(msg.to_string()).into()
}
}

315
third_party/rust/bincode/src/internal.rs поставляемый
Просмотреть файл

@ -1,124 +1,191 @@
use serde;
use std::io::{Read, Write};
use std::marker::PhantomData;
use config::{Infinite, InternalOptions, Options, SizeLimit, TrailingBytes};
use de::read::BincodeRead;
use Result;
pub(crate) fn serialize_into<W, T: ?Sized, O>(writer: W, value: &T, mut options: O) -> Result<()>
where
W: Write,
T: serde::Serialize,
O: InternalOptions,
{
if options.limit().limit().is_some() {
// "compute" the size for the side-effect
// of returning Err if the bound was reached.
serialized_size(value, &mut options)?;
}
let mut serializer = ::ser::Serializer::<_, O>::new(writer, options);
serde::Serialize::serialize(value, &mut serializer)
}
pub(crate) fn serialize<T: ?Sized, O>(value: &T, mut options: O) -> Result<Vec<u8>>
where
T: serde::Serialize,
O: InternalOptions,
{
let mut writer = {
let actual_size = serialized_size(value, &mut options)?;
Vec::with_capacity(actual_size as usize)
};
serialize_into(&mut writer, value, options.with_no_limit())?;
Ok(writer)
}
pub(crate) fn serialized_size<T: ?Sized, O: InternalOptions>(value: &T, options: O) -> Result<u64>
where
T: serde::Serialize,
{
let mut size_counter = ::ser::SizeChecker { options, total: 0 };
let result = value.serialize(&mut size_counter);
result.map(|_| size_counter.total)
}
pub(crate) fn deserialize_from<R, T, O>(reader: R, options: O) -> Result<T>
where
R: Read,
T: serde::de::DeserializeOwned,
O: InternalOptions,
{
deserialize_from_seed(PhantomData, reader, options)
}
pub(crate) fn deserialize_from_seed<'a, R, T, O>(seed: T, reader: R, options: O) -> Result<T::Value>
where
R: Read,
T: serde::de::DeserializeSeed<'a>,
O: InternalOptions,
{
let reader = ::de::read::IoReader::new(reader);
deserialize_from_custom_seed(seed, reader, options)
}
pub(crate) fn deserialize_from_custom<'a, R, T, O>(reader: R, options: O) -> Result<T>
where
R: BincodeRead<'a>,
T: serde::de::DeserializeOwned,
O: InternalOptions,
{
deserialize_from_custom_seed(PhantomData, reader, options)
}
pub(crate) fn deserialize_from_custom_seed<'a, R, T, O>(
seed: T,
reader: R,
options: O,
) -> Result<T::Value>
where
R: BincodeRead<'a>,
T: serde::de::DeserializeSeed<'a>,
O: InternalOptions,
{
let mut deserializer = ::de::Deserializer::<_, O>::with_bincode_read(reader, options);
seed.deserialize(&mut deserializer)
}
pub(crate) fn deserialize_in_place<'a, R, T, O>(reader: R, options: O, place: &mut T) -> Result<()>
where
R: BincodeRead<'a>,
T: serde::de::Deserialize<'a>,
O: InternalOptions,
{
let mut deserializer = ::de::Deserializer::<_, _>::with_bincode_read(reader, options);
serde::Deserialize::deserialize_in_place(&mut deserializer, place)
}
pub(crate) fn deserialize<'a, T, O>(bytes: &'a [u8], options: O) -> Result<T>
where
T: serde::de::Deserialize<'a>,
O: InternalOptions,
{
deserialize_seed(PhantomData, bytes, options)
}
pub(crate) fn deserialize_seed<'a, T, O>(seed: T, bytes: &'a [u8], options: O) -> Result<T::Value>
where
T: serde::de::DeserializeSeed<'a>,
O: InternalOptions,
{
let options = ::config::WithOtherLimit::new(options, Infinite);
let reader = ::de::read::SliceReader::new(bytes);
let mut deserializer = ::de::Deserializer::with_bincode_read(reader, options);
let val = seed.deserialize(&mut deserializer)?;
match O::Trailing::check_end(&deserializer.reader) {
Ok(_) => Ok(val),
Err(err) => Err(err),
}
}
use serde;
use std::io::{Read, Write};
use std::marker::PhantomData;
use config::{Options, OptionsExt};
use de::read::BincodeRead;
use {ErrorKind, Result};
#[derive(Clone)]
struct CountSize<L: SizeLimit> {
total: u64,
other_limit: L,
}
pub(crate) fn serialize_into<W, T: ?Sized, O>(writer: W, value: &T, mut options: O) -> Result<()>
where
W: Write,
T: serde::Serialize,
O: Options,
{
if options.limit().limit().is_some() {
// "compute" the size for the side-effect
// of returning Err if the bound was reached.
serialized_size(value, &mut options)?;
}
let mut serializer = ::ser::Serializer::<_, O>::new(writer, options);
serde::Serialize::serialize(value, &mut serializer)
}
pub(crate) fn serialize<T: ?Sized, O>(value: &T, mut options: O) -> Result<Vec<u8>>
where
T: serde::Serialize,
O: Options,
{
let mut writer = {
let actual_size = serialized_size(value, &mut options)?;
Vec::with_capacity(actual_size as usize)
};
serialize_into(&mut writer, value, options.with_no_limit())?;
Ok(writer)
}
impl<L: SizeLimit> SizeLimit for CountSize<L> {
fn add(&mut self, c: u64) -> Result<()> {
self.other_limit.add(c)?;
self.total += c;
Ok(())
}
fn limit(&self) -> Option<u64> {
unreachable!();
}
}
pub(crate) fn serialized_size<T: ?Sized, O: Options>(value: &T, mut options: O) -> Result<u64>
where
T: serde::Serialize,
{
let old_limiter = options.limit().clone();
let mut size_counter = ::ser::SizeChecker {
options: ::config::WithOtherLimit::new(
options,
CountSize {
total: 0,
other_limit: old_limiter,
},
),
};
let result = value.serialize(&mut size_counter);
result.map(|_| size_counter.options.new_limit.total)
}
pub(crate) fn deserialize_from<R, T, O>(reader: R, options: O) -> Result<T>
where
R: Read,
T: serde::de::DeserializeOwned,
O: Options,
{
deserialize_from_seed(PhantomData, reader, options)
}
pub(crate) fn deserialize_from_seed<'a, R, T, O>(seed: T, reader: R, options: O) -> Result<T::Value>
where
R: Read,
T: serde::de::DeserializeSeed<'a>,
O: Options,
{
let reader = ::de::read::IoReader::new(reader);
deserialize_from_custom_seed(seed, reader, options)
}
pub(crate) fn deserialize_from_custom<'a, R, T, O>(reader: R, options: O) -> Result<T>
where
R: BincodeRead<'a>,
T: serde::de::DeserializeOwned,
O: Options,
{
deserialize_from_custom_seed(PhantomData, reader, options)
}
pub(crate) fn deserialize_from_custom_seed<'a, R, T, O>(
seed: T,
reader: R,
options: O,
) -> Result<T::Value>
where
R: BincodeRead<'a>,
T: serde::de::DeserializeSeed<'a>,
O: Options,
{
let mut deserializer = ::de::Deserializer::<_, O>::new(reader, options);
seed.deserialize(&mut deserializer)
}
pub(crate) fn deserialize_in_place<'a, R, T, O>(reader: R, options: O, place: &mut T) -> Result<()>
where
R: BincodeRead<'a>,
T: serde::de::Deserialize<'a>,
O: Options,
{
let mut deserializer = ::de::Deserializer::<_, _>::new(reader, options);
serde::Deserialize::deserialize_in_place(&mut deserializer, place)
}
pub(crate) fn deserialize<'a, T, O>(bytes: &'a [u8], options: O) -> Result<T>
where
T: serde::de::Deserialize<'a>,
O: Options,
{
deserialize_seed(PhantomData, bytes, options)
}
pub(crate) fn deserialize_seed<'a, T, O>(seed: T, bytes: &'a [u8], options: O) -> Result<T::Value>
where
T: serde::de::DeserializeSeed<'a>,
O: Options,
{
let reader = ::de::read::SliceReader::new(bytes);
let options = ::config::WithOtherLimit::new(options, Infinite);
deserialize_from_custom_seed(seed, reader, options)
}
pub(crate) trait SizeLimit: Clone {
/// Tells the SizeLimit that a certain number of bytes has been
/// read or written. Returns Err if the limit has been exceeded.
fn add(&mut self, n: u64) -> Result<()>;
/// Returns the hard limit (if one exists)
fn limit(&self) -> Option<u64>;
}
/// A SizeLimit that restricts serialized or deserialized messages from
/// exceeding a certain byte length.
#[derive(Copy, Clone)]
pub struct Bounded(pub u64);
/// A SizeLimit without a limit!
/// Use this if you don't care about the size of encoded or decoded messages.
#[derive(Copy, Clone)]
pub struct Infinite;
impl SizeLimit for Bounded {
#[inline(always)]
fn add(&mut self, n: u64) -> Result<()> {
if self.0 >= n {
self.0 -= n;
Ok(())
} else {
Err(Box::new(ErrorKind::SizeLimit))
}
}
#[inline(always)]
fn limit(&self) -> Option<u64> {
Some(self.0)
}
}
impl SizeLimit for Infinite {
#[inline(always)]
fn add(&mut self, _: u64) -> Result<()> {
Ok(())
}
#[inline(always)]
fn limit(&self) -> Option<u64> {
None
}
}

339
third_party/rust/bincode/src/lib.rs поставляемый
Просмотреть файл

@ -1,166 +1,173 @@
#![deny(missing_docs)]
#![allow(unknown_lints, bare_trait_objects, deprecated)]
//! Bincode is a crate for encoding and decoding using a tiny binary
//! serialization strategy. Using it, you can easily go from having
//! an object in memory, quickly serialize it to bytes, and then
//! deserialize it back just as fast!
//!
//! ### Using Basic Functions
//!
//! ```edition2018
//! fn main() {
//! // The object that we will serialize.
//! let target: Option<String> = Some("hello world".to_string());
//!
//! let encoded: Vec<u8> = bincode::serialize(&target).unwrap();
//! let decoded: Option<String> = bincode::deserialize(&encoded[..]).unwrap();
//! assert_eq!(target, decoded);
//! }
//! ```
//!
//! ### 128bit numbers
//!
//! Support for `i128` and `u128` is automatically enabled on Rust toolchains
//! greater than or equal to `1.26.0` and disabled for targets which do not support it
#![doc(html_root_url = "https://docs.rs/bincode/1.3.1")]
#![crate_name = "bincode"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
extern crate byteorder;
#[macro_use]
extern crate serde;
/// Configuration settings for bincode.
pub mod config;
/// Deserialize bincode data to a Rust data structure.
pub mod de;
mod error;
mod internal;
mod ser;
pub use config::{Config, DefaultOptions, Options};
pub use de::read::BincodeRead;
pub use de::Deserializer;
pub use error::{Error, ErrorKind, Result};
pub use ser::Serializer;
/// Get a default configuration object.
///
/// ### Default Configuration:
///
/// | Byte limit | Endianness |
/// |------------|------------|
/// | Unlimited | Little |
#[inline(always)]
#[deprecated(since = "1.3.0", note = "please use `options()` instead")]
pub fn config() -> Config {
Config::new()
}
/// Get a default configuration object.
///
/// ### Default Configuration:
///
/// | Byte limit | Endianness | Int Encoding | Trailing Behavior |
/// |------------|------------|--------------|-------------------|
/// | Unlimited | Little | Varint | Reject |
#[inline(always)]
pub fn options() -> DefaultOptions {
DefaultOptions::new()
}
/// Serializes an object directly into a `Writer` using the default configuration.
///
/// If the serialization would take more bytes than allowed by the size limit, an error
/// is returned and *no bytes* will be written into the `Writer`.
pub fn serialize_into<W, T: ?Sized>(writer: W, value: &T) -> Result<()>
where
W: std::io::Write,
T: serde::Serialize,
{
DefaultOptions::new()
.with_fixint_encoding()
.serialize_into(writer, value)
}
/// Serializes a serializable object into a `Vec` of bytes using the default configuration.
pub fn serialize<T: ?Sized>(value: &T) -> Result<Vec<u8>>
where
T: serde::Serialize,
{
DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.serialize(value)
}
/// Deserializes an object directly from a `Read`er using the default configuration.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
pub fn deserialize_from<R, T>(reader: R) -> Result<T>
where
R: std::io::Read,
T: serde::de::DeserializeOwned,
{
DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from(reader)
}
/// Deserializes an object from a custom `BincodeRead`er using the default configuration.
/// It is highly recommended to use `deserialize_from` unless you need to implement
/// `BincodeRead` for performance reasons.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
pub fn deserialize_from_custom<'a, R, T>(reader: R) -> Result<T>
where
R: de::read::BincodeRead<'a>,
T: serde::de::DeserializeOwned,
{
DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_from_custom(reader)
}
/// Only use this if you know what you're doing.
///
/// This is part of the public API.
#[doc(hidden)]
pub fn deserialize_in_place<'a, R, T>(reader: R, place: &mut T) -> Result<()>
where
T: serde::de::Deserialize<'a>,
R: BincodeRead<'a>,
{
DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize_in_place(reader, place)
}
/// Deserializes a slice of bytes into an instance of `T` using the default configuration.
pub fn deserialize<'a, T>(bytes: &'a [u8]) -> Result<T>
where
T: serde::de::Deserialize<'a>,
{
DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.deserialize(bytes)
}
/// Returns the size that an object would be if serialized using Bincode with the default configuration.
pub fn serialized_size<T: ?Sized>(value: &T) -> Result<u64>
where
T: serde::Serialize,
{
DefaultOptions::new()
.with_fixint_encoding()
.allow_trailing_bytes()
.serialized_size(value)
}
#![deny(missing_docs)]
//! Bincode is a crate for encoding and decoding using a tiny binary
//! serialization strategy. Using it, you can easily go from having
//! an object in memory, quickly serialize it to bytes, and then
//! deserialize it back just as fast!
//!
//! ### Using Basic Functions
//!
//! ```edition2018
//! fn main() {
//! // The object that we will serialize.
//! let target: Option<String> = Some("hello world".to_string());
//!
//! let encoded: Vec<u8> = bincode::serialize(&target).unwrap();
//! let decoded: Option<String> = bincode::deserialize(&encoded[..]).unwrap();
//! assert_eq!(target, decoded);
//! }
//! ```
//!
//! ### 128bit numbers
//!
//! Support for `i128` and `u128` is automatically enabled on Rust toolchains
//! greater than or equal to `1.26.0` and disabled for targets which do not support it
#![doc(html_root_url = "https://docs.rs/bincode/1.2.1")]
#![crate_name = "bincode"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
extern crate byteorder;
#[macro_use]
extern crate serde;
mod config;
mod de;
mod error;
mod internal;
mod ser;
pub use config::Config;
pub use de::read::{BincodeRead, IoReader, SliceReader};
pub use error::{Error, ErrorKind, Result};
/// An object that implements this trait can be passed a
/// serde::Deserializer without knowing its concrete type.
///
/// This trait should be used only for `with_deserializer` functions.
#[doc(hidden)]
pub trait DeserializerAcceptor<'a> {
/// The return type for the accept method
type Output;
/// Accept a serde::Deserializer and do whatever you want with it.
fn accept<T: serde::Deserializer<'a>>(self, T) -> Self::Output;
}
/// An object that implements this trait can be passed a
/// serde::Serializer without knowing its concrete type.
///
/// This trait should be used only for `with_serializer` functions.
#[doc(hidden)]
pub trait SerializerAcceptor {
/// The return type for the accept method
type Output;
/// Accept a serde::Serializer and do whatever you want with it.
fn accept<T: serde::Serializer>(self, T) -> Self::Output;
}
/// Get a default configuration object.
///
/// ### Default Configuration:
///
/// | Byte limit | Endianness |
/// |------------|------------|
/// | Unlimited | Little |
#[inline(always)]
pub fn config() -> Config {
Config::new()
}
/// Serializes an object directly into a `Writer` using the default configuration.
///
/// If the serialization would take more bytes than allowed by the size limit, an error
/// is returned and *no bytes* will be written into the `Writer`.
pub fn serialize_into<W, T: ?Sized>(writer: W, value: &T) -> Result<()>
where
W: std::io::Write,
T: serde::Serialize,
{
config().serialize_into(writer, value)
}
/// Serializes a serializable object into a `Vec` of bytes using the default configuration.
pub fn serialize<T: ?Sized>(value: &T) -> Result<Vec<u8>>
where
T: serde::Serialize,
{
config().serialize(value)
}
/// Deserializes an object directly from a `Read`er using the default configuration.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
pub fn deserialize_from<R, T>(reader: R) -> Result<T>
where
R: std::io::Read,
T: serde::de::DeserializeOwned,
{
config().deserialize_from(reader)
}
/// Deserializes an object from a custom `BincodeRead`er using the default configuration.
/// It is highly recommended to use `deserialize_from` unless you need to implement
/// `BincodeRead` for performance reasons.
///
/// If this returns an `Error`, `reader` may be in an invalid state.
pub fn deserialize_from_custom<'a, R, T>(reader: R) -> Result<T>
where
R: de::read::BincodeRead<'a>,
T: serde::de::DeserializeOwned,
{
config().deserialize_from_custom(reader)
}
/// Only use this if you know what you're doing.
///
/// This is part of the public API.
#[doc(hidden)]
pub fn deserialize_in_place<'a, R, T>(reader: R, place: &mut T) -> Result<()>
where
T: serde::de::Deserialize<'a>,
R: BincodeRead<'a>,
{
config().deserialize_in_place(reader, place)
}
/// Deserializes a slice of bytes into an instance of `T` using the default configuration.
pub fn deserialize<'a, T>(bytes: &'a [u8]) -> Result<T>
where
T: serde::de::Deserialize<'a>,
{
config().deserialize(bytes)
}
/// Returns the size that an object would be if serialized using Bincode with the default configuration.
pub fn serialized_size<T: ?Sized>(value: &T) -> Result<u64>
where
T: serde::Serialize,
{
config().serialized_size(value)
}
/// Executes the acceptor with a serde::Deserializer instance.
/// NOT A PART OF THE STABLE PUBLIC API
#[doc(hidden)]
pub fn with_deserializer<'a, A, R>(reader: R, acceptor: A) -> A::Output
where
A: DeserializerAcceptor<'a>,
R: BincodeRead<'a>,
{
config().with_deserializer(reader, acceptor)
}
/// Executes the acceptor with a serde::Serializer instance.
/// NOT A PART OF THE STABLE PUBLIC API
#[doc(hidden)]
pub fn with_serializer<A, W>(writer: W, acceptor: A) -> A::Output
where
A: SerializerAcceptor,
W: std::io::Write,
{
config().with_serializer(writer, acceptor)
}

1542
third_party/rust/bincode/src/ser/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

900
third_party/rust/bincode/tests/test.rs поставляемый
Просмотреть файл

@ -1,900 +0,0 @@
#[macro_use]
extern crate serde_derive;
extern crate bincode;
extern crate byteorder;
#[macro_use]
extern crate serde;
extern crate serde_bytes;
use std::borrow::Cow;
use std::collections::HashMap;
use std::fmt::{self, Debug};
use std::result::Result as StdResult;
use bincode::{
deserialize, deserialize_from, deserialize_in_place, serialize, serialized_size,
DefaultOptions, ErrorKind, Options, Result,
};
use serde::de::{Deserialize, DeserializeSeed, Deserializer, SeqAccess, Visitor};
const LEN_SIZE: u64 = 8;
fn the_same_impl<V, O>(element: V, options: &mut O)
where
V: serde::Serialize + serde::de::DeserializeOwned + PartialEq + Debug + 'static,
O: Options,
{
let size = options.serialized_size(&element).unwrap();
{
let encoded = options.serialize(&element).unwrap();
let decoded: V = options.deserialize(&encoded[..]).unwrap();
let decoded_reader = options.deserialize_from(&mut &encoded[..]).unwrap();
assert_eq!(element, decoded);
assert_eq!(element, decoded_reader);
assert_eq!(size, encoded.len() as u64);
}
}
fn the_same<V>(element: V)
where
V: serde::Serialize + serde::de::DeserializeOwned + PartialEq + Debug + Clone + 'static,
{
// add a new macro which calls the previous when you add a new option set
macro_rules! all_endians {
($element:expr, $options:expr) => {
the_same_impl($element.clone(), &mut $options.with_native_endian());
the_same_impl($element.clone(), &mut $options.with_big_endian());
the_same_impl($element.clone(), &mut $options.with_little_endian());
};
}
macro_rules! all_integer_encodings {
($element:expr, $options:expr) => {
all_endians!($element, $options.with_fixint_encoding());
all_endians!($element, $options.with_varint_encoding());
};
}
all_integer_encodings!(element, DefaultOptions::new());
}
#[test]
fn test_numbers() {
// unsigned positive
the_same(5u8);
the_same(5u16);
the_same(5u32);
the_same(5u64);
the_same(5usize);
// signed positive
the_same(5i8);
the_same(5i16);
the_same(5i32);
the_same(5i64);
the_same(5isize);
// signed negative
the_same(-5i8);
the_same(-5i16);
the_same(-5i32);
the_same(-5i64);
the_same(-5isize);
// floating
the_same(-100f32);
the_same(0f32);
the_same(5f32);
the_same(-100f64);
the_same(5f64);
}
serde_if_integer128! {
#[test]
fn test_numbers_128bit() {
// unsigned positive
the_same(5u128);
the_same(u128::max_value());
// signed positive
the_same(5i128);
the_same(i128::max_value());
// signed negative
the_same(-5i128);
the_same(i128::min_value());
}
}
#[test]
fn test_string() {
the_same("".to_string());
the_same("a".to_string());
}
#[test]
fn test_tuple() {
the_same((1isize,));
the_same((1isize, 2isize, 3isize));
the_same((1isize, "foo".to_string(), ()));
}
#[test]
fn test_basic_struct() {
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct Easy {
x: isize,
s: String,
y: usize,
}
the_same(Easy {
x: -4,
s: "foo".to_string(),
y: 10,
});
}
#[test]
fn test_nested_struct() {
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct Easy {
x: isize,
s: String,
y: usize,
}
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct Nest {
f: Easy,
b: usize,
s: Easy,
}
the_same(Nest {
f: Easy {
x: -1,
s: "foo".to_string(),
y: 20,
},
b: 100,
s: Easy {
x: -100,
s: "bar".to_string(),
y: 20,
},
});
}
#[test]
fn test_struct_newtype() {
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct NewtypeStr(usize);
the_same(NewtypeStr(5));
}
#[test]
fn test_struct_tuple() {
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
struct TubStr(usize, String, f32);
the_same(TubStr(5, "hello".to_string(), 3.2));
}
#[test]
fn test_option() {
the_same(Some(5usize));
the_same(Some("foo bar".to_string()));
the_same(None::<usize>);
}
#[test]
fn test_enum() {
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
enum TestEnum {
NoArg,
OneArg(usize),
Args(usize, usize),
AnotherNoArg,
StructLike { x: usize, y: f32 },
}
the_same(TestEnum::NoArg);
the_same(TestEnum::OneArg(4));
//the_same(TestEnum::Args(4, 5));
the_same(TestEnum::AnotherNoArg);
the_same(TestEnum::StructLike { x: 4, y: 3.14159 });
the_same(vec![
TestEnum::NoArg,
TestEnum::OneArg(5),
TestEnum::AnotherNoArg,
TestEnum::StructLike { x: 4, y: 1.4 },
]);
}
#[test]
fn test_vec() {
let v: Vec<u8> = vec![];
the_same(v);
the_same(vec![1u64]);
the_same(vec![1u64, 2, 3, 4, 5, 6]);
}
#[test]
fn test_map() {
let mut m = HashMap::new();
m.insert(4u64, "foo".to_string());
m.insert(0u64, "bar".to_string());
the_same(m);
}
#[test]
fn test_bool() {
the_same(true);
the_same(false);
}
#[test]
fn test_unicode() {
the_same("å".to_string());
the_same("aåååååååa".to_string());
}
#[test]
fn test_fixed_size_array() {
the_same([24u32; 32]);
the_same([1u64, 2, 3, 4, 5, 6, 7, 8]);
the_same([0u8; 19]);
}
#[test]
fn deserializing_errors() {
match *deserialize::<bool>(&vec![0xA][..]).unwrap_err() {
ErrorKind::InvalidBoolEncoding(0xA) => {}
_ => panic!(),
}
let invalid_str = vec![1, 0, 0, 0, 0, 0, 0, 0, 0xFF];
match *deserialize::<String>(&invalid_str[..]).unwrap_err() {
ErrorKind::InvalidUtf8Encoding(_) => {}
_ => panic!(),
}
// Out-of-bounds variant
#[derive(Serialize, Deserialize, Debug)]
enum Test {
One,
Two,
};
let invalid_enum = vec![0, 0, 0, 5];
match *deserialize::<Test>(&invalid_enum[..]).unwrap_err() {
// Error message comes from serde
ErrorKind::Custom(_) => {}
_ => panic!(),
}
match *deserialize::<Option<u8>>(&vec![5, 0][..]).unwrap_err() {
ErrorKind::InvalidTagEncoding(_) => {}
_ => panic!(),
}
}
#[test]
fn trailing_bytes() {
match DefaultOptions::new()
.deserialize::<char>(b"1x")
.map_err(|e| *e)
{
Err(ErrorKind::Custom(_)) => {}
other => panic!("Expecting TrailingBytes, got {:?}", other),
}
}
#[test]
fn too_big_deserialize() {
let serialized = vec![0, 0, 0, 3];
let deserialized: Result<u32> = DefaultOptions::new()
.with_fixint_encoding()
.with_limit(3)
.deserialize_from(&mut &serialized[..]);
assert!(deserialized.is_err());
let serialized = vec![0, 0, 0, 3];
let deserialized: Result<u32> = DefaultOptions::new()
.with_fixint_encoding()
.with_limit(4)
.deserialize_from(&mut &serialized[..]);
assert!(deserialized.is_ok());
}
#[test]
fn char_serialization() {
let chars = "Aa\0☺♪";
for c in chars.chars() {
let encoded = DefaultOptions::new()
.with_limit(4)
.serialize(&c)
.expect("serializing char failed");
let decoded: char = deserialize(&encoded).expect("deserializing failed");
assert_eq!(decoded, c);
}
}
#[test]
fn too_big_char_deserialize() {
let serialized = vec![0x41];
let deserialized: Result<char> = DefaultOptions::new()
.with_limit(1)
.deserialize_from(&mut &serialized[..]);
assert!(deserialized.is_ok());
assert_eq!(deserialized.unwrap(), 'A');
}
#[test]
fn too_big_serialize() {
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(3)
.serialize(&0u32)
.is_err());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(4)
.serialize(&0u32)
.is_ok());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(LEN_SIZE + 4)
.serialize(&"abcde")
.is_err());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(LEN_SIZE + 5)
.serialize(&"abcde")
.is_ok());
}
#[test]
fn test_serialized_size() {
assert!(serialized_size(&0u8).unwrap() == 1);
assert!(serialized_size(&0u16).unwrap() == 2);
assert!(serialized_size(&0u32).unwrap() == 4);
assert!(serialized_size(&0u64).unwrap() == 8);
// length isize stored as u64
assert!(serialized_size(&"").unwrap() == LEN_SIZE);
assert!(serialized_size(&"a").unwrap() == LEN_SIZE + 1);
assert!(serialized_size(&vec![0u32, 1u32, 2u32]).unwrap() == LEN_SIZE + 3 * (4));
}
#[test]
fn test_serialized_size_bounded() {
// JUST RIGHT
assert!(
DefaultOptions::new()
.with_fixint_encoding()
.with_limit(1)
.serialized_size(&0u8)
.unwrap()
== 1
);
assert!(
DefaultOptions::new()
.with_fixint_encoding()
.with_limit(2)
.serialized_size(&0u16)
.unwrap()
== 2
);
assert!(
DefaultOptions::new()
.with_fixint_encoding()
.with_limit(4)
.serialized_size(&0u32)
.unwrap()
== 4
);
assert!(
DefaultOptions::new()
.with_fixint_encoding()
.with_limit(8)
.serialized_size(&0u64)
.unwrap()
== 8
);
assert!(
DefaultOptions::new()
.with_fixint_encoding()
.with_limit(8)
.serialized_size(&"")
.unwrap()
== LEN_SIZE
);
assert!(
DefaultOptions::new()
.with_fixint_encoding()
.with_limit(8 + 1)
.serialized_size(&"a")
.unwrap()
== LEN_SIZE + 1
);
assert!(
DefaultOptions::new()
.with_fixint_encoding()
.with_limit(LEN_SIZE + 3 * 4)
.serialized_size(&vec![0u32, 1u32, 2u32])
.unwrap()
== LEN_SIZE + 3 * 4
);
// Below
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(0)
.serialized_size(&0u8)
.is_err());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(1)
.serialized_size(&0u16)
.is_err());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(3)
.serialized_size(&0u32)
.is_err());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(7)
.serialized_size(&0u64)
.is_err());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(7)
.serialized_size(&"")
.is_err());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(8 + 0)
.serialized_size(&"a")
.is_err());
assert!(DefaultOptions::new()
.with_fixint_encoding()
.with_limit(8 + 3 * 4 - 1)
.serialized_size(&vec![0u32, 1u32, 2u32])
.is_err());
}
#[test]
fn encode_box() {
the_same(Box::new(5));
}
#[test]
fn test_cow_serialize() {
let large_object = vec![1u32, 2, 3, 4, 5, 6];
let mut large_map = HashMap::new();
large_map.insert(1, 2);
#[derive(Serialize, Deserialize, Debug)]
enum Message<'a> {
M1(Cow<'a, Vec<u32>>),
M2(Cow<'a, HashMap<u32, u32>>),
}
// Test 1
{
let serialized = serialize(&Message::M1(Cow::Borrowed(&large_object))).unwrap();
let deserialized: Message<'static> = deserialize_from(&mut &serialized[..]).unwrap();
match deserialized {
Message::M1(b) => assert!(&b.into_owned() == &large_object),
_ => assert!(false),
}
}
// Test 2
{
let serialized = serialize(&Message::M2(Cow::Borrowed(&large_map))).unwrap();
let deserialized: Message<'static> = deserialize_from(&mut &serialized[..]).unwrap();
match deserialized {
Message::M2(b) => assert!(&b.into_owned() == &large_map),
_ => assert!(false),
}
}
}
#[test]
fn test_strbox_serialize() {
let strx: &'static str = "hello world";
let serialized = serialize(&Cow::Borrowed(strx)).unwrap();
let deserialized: Cow<'static, String> = deserialize_from(&mut &serialized[..]).unwrap();
let stringx: String = deserialized.into_owned();
assert!(strx == &stringx[..]);
}
#[test]
fn test_slicebox_serialize() {
let slice = [1u32, 2, 3, 4, 5];
let serialized = serialize(&Cow::Borrowed(&slice[..])).unwrap();
println!("{:?}", serialized);
let deserialized: Cow<'static, Vec<u32>> = deserialize_from(&mut &serialized[..]).unwrap();
{
let sb: &[u32] = &deserialized;
assert!(slice == sb);
}
let vecx: Vec<u32> = deserialized.into_owned();
assert!(slice == &vecx[..]);
}
#[test]
fn test_multi_strings_serialize() {
assert!(serialize(&("foo", "bar", "baz")).is_ok());
}
#[test]
fn test_oom_protection() {
use std::io::Cursor;
#[derive(Serialize, Deserialize, PartialEq, Debug)]
struct FakeVec {
len: u64,
byte: u8,
}
let x = DefaultOptions::new()
.with_limit(10)
.serialize(&FakeVec {
len: 0xffffffffffffffffu64,
byte: 1,
})
.unwrap();
let y: Result<Vec<u8>> = DefaultOptions::new()
.with_limit(10)
.deserialize_from(&mut Cursor::new(&x[..]));
assert!(y.is_err());
}
#[test]
fn path_buf() {
use std::path::{Path, PathBuf};
let path = Path::new("foo").to_path_buf();
let serde_encoded = serialize(&path).unwrap();
let decoded: PathBuf = deserialize(&serde_encoded).unwrap();
assert!(path.to_str() == decoded.to_str());
}
#[test]
fn bytes() {
use serde_bytes::Bytes;
let data = b"abc\0123";
let s = serialize(&data[..]).unwrap();
let s2 = serialize(&Bytes::new(data)).unwrap();
assert_eq!(s[..], s2[..]);
}
#[test]
fn serde_bytes() {
use serde_bytes::ByteBuf;
the_same(ByteBuf::from(vec![1, 2, 3, 4, 5]));
}
#[test]
fn endian_difference() {
let x = 10u64;
let little = serialize(&x).unwrap();
let big = DefaultOptions::new()
.with_big_endian()
.serialize(&x)
.unwrap();
assert_ne!(little, big);
}
#[test]
fn test_zero_copy_parse() {
#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)]
struct Foo<'a> {
borrowed_str: &'a str,
borrowed_bytes: &'a [u8],
}
let f = Foo {
borrowed_str: "hi",
borrowed_bytes: &[0, 1, 2, 3],
};
{
let encoded = serialize(&f).unwrap();
let out: Foo = deserialize(&encoded[..]).unwrap();
assert_eq!(out, f);
}
}
#[test]
fn test_zero_copy_parse_deserialize_into() {
use bincode::BincodeRead;
use std::io;
/// A BincodeRead implementation for byte slices
pub struct SliceReader<'storage> {
slice: &'storage [u8],
}
impl<'storage> SliceReader<'storage> {
#[inline(always)]
fn unexpected_eof() -> Box<::ErrorKind> {
return Box::new(::ErrorKind::Io(io::Error::new(
io::ErrorKind::UnexpectedEof,
"",
)));
}
}
impl<'storage> io::Read for SliceReader<'storage> {
#[inline(always)]
fn read(&mut self, out: &mut [u8]) -> io::Result<usize> {
(&mut self.slice).read(out)
}
#[inline(always)]
fn read_exact(&mut self, out: &mut [u8]) -> io::Result<()> {
(&mut self.slice).read_exact(out)
}
}
impl<'storage> BincodeRead<'storage> for SliceReader<'storage> {
#[inline(always)]
fn forward_read_str<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>,
{
use ErrorKind;
if length > self.slice.len() {
return Err(SliceReader::unexpected_eof());
}
let string = match ::std::str::from_utf8(&self.slice[..length]) {
Ok(s) => s,
Err(e) => return Err(ErrorKind::InvalidUtf8Encoding(e).into()),
};
let r = visitor.visit_borrowed_str(string);
self.slice = &self.slice[length..];
r
}
#[inline(always)]
fn get_byte_buffer(&mut self, length: usize) -> Result<Vec<u8>> {
if length > self.slice.len() {
return Err(SliceReader::unexpected_eof());
}
let r = &self.slice[..length];
self.slice = &self.slice[length..];
Ok(r.to_vec())
}
#[inline(always)]
fn forward_read_bytes<V>(&mut self, length: usize, visitor: V) -> Result<V::Value>
where
V: serde::de::Visitor<'storage>,
{
if length > self.slice.len() {
return Err(SliceReader::unexpected_eof());
}
let r = visitor.visit_borrowed_bytes(&self.slice[..length]);
self.slice = &self.slice[length..];
r
}
}
#[derive(Serialize, Deserialize, Eq, PartialEq, Debug)]
struct Foo<'a> {
borrowed_str: &'a str,
borrowed_bytes: &'a [u8],
}
let f = Foo {
borrowed_str: "hi",
borrowed_bytes: &[0, 1, 2, 3],
};
{
let encoded = serialize(&f).unwrap();
let mut target = Foo {
borrowed_str: "hello",
borrowed_bytes: &[10, 11, 12, 13],
};
deserialize_in_place(
SliceReader {
slice: &encoded[..],
},
&mut target,
)
.unwrap();
assert_eq!(target, f);
}
}
#[test]
fn not_human_readable() {
use std::net::Ipv4Addr;
let ip = Ipv4Addr::new(1, 2, 3, 4);
the_same(ip);
assert_eq!(&ip.octets()[..], &serialize(&ip).unwrap()[..]);
assert_eq!(
::std::mem::size_of::<Ipv4Addr>() as u64,
serialized_size(&ip).unwrap()
);
}
// The example is taken from serde::de::DeserializeSeed.
struct ExtendVec<'a, T: 'a>(&'a mut Vec<T>);
impl<'de, 'a, T> DeserializeSeed<'de> for ExtendVec<'a, T>
where
T: Deserialize<'de>,
{
// The return type of the `deserialize` method. This implementation
// appends onto an existing vector but does not create any new data
// structure, so the return type is ().
type Value = ();
fn deserialize<D>(self, deserializer: D) -> StdResult<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
// Visitor implementation that will walk an inner array of the JSON
// input.
struct ExtendVecVisitor<'a, T: 'a>(&'a mut Vec<T>);
impl<'de, 'a, T> Visitor<'de> for ExtendVecVisitor<'a, T>
where
T: Deserialize<'de>,
{
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "an array of integers")
}
fn visit_seq<A>(self, mut seq: A) -> StdResult<(), A::Error>
where
A: SeqAccess<'de>,
{
// Visit each element in the inner array and push it onto
// the existing vector.
while let Some(elem) = seq.next_element()? {
self.0.push(elem);
}
Ok(())
}
}
deserializer.deserialize_seq(ExtendVecVisitor(self.0))
}
}
#[test]
fn test_default_deserialize_seed() {
let config = DefaultOptions::new();
let data: Vec<_> = (10..100).collect();
let bytes = config.serialize(&data).expect("Config::serialize failed");
let mut seed_data: Vec<_> = (0..10).collect();
{
let seed = ExtendVec(&mut seed_data);
config
.deserialize_seed(seed, &bytes)
.expect("Config::deserialize_seed failed");
}
assert_eq!(seed_data, (0..100).collect::<Vec<_>>());
}
#[test]
fn test_big_endian_deserialize_seed() {
let config = DefaultOptions::new().with_big_endian();
let data: Vec<_> = (10..100).collect();
let bytes = config.serialize(&data).expect("Config::serialize failed");
let mut seed_data: Vec<_> = (0..10).collect();
{
let seed = ExtendVec(&mut seed_data);
config
.deserialize_seed(seed, &bytes)
.expect("Config::deserialize_seed failed");
}
assert_eq!(seed_data, (0..100).collect::<Vec<_>>());
}
#[test]
fn test_default_deserialize_from_seed() {
let config = DefaultOptions::new();
let data: Vec<_> = (10..100).collect();
let bytes = config.serialize(&data).expect("Config::serialize failed");
let mut seed_data: Vec<_> = (0..10).collect();
{
let seed = ExtendVec(&mut seed_data);
config
.deserialize_from_seed(seed, &mut &*bytes)
.expect("Config::deserialize_from_seed failed");
}
assert_eq!(seed_data, (0..100).collect::<Vec<_>>());
}
#[test]
fn test_big_endian_deserialize_from_seed() {
let config = DefaultOptions::new().with_big_endian();
let data: Vec<_> = (10..100).collect();
let bytes = config.serialize(&data).expect("Config::serialize failed");
let mut seed_data: Vec<_> = (0..10).collect();
{
let seed = ExtendVec(&mut seed_data);
config
.deserialize_from_seed(seed, &mut &*bytes)
.expect("Config::deserialize_from_seed failed");
}
assert_eq!(seed_data, (0..100).collect::<Vec<_>>());
}
#[test]
fn test_varint_length_prefixes() {
let a = vec![(); 127]; // should be a single byte
let b = vec![(); 250]; // also should be a single byte
let c = vec![(); 251];
let d = vec![(); u16::max_value() as usize + 1];
assert_eq!(
DefaultOptions::new()
.with_varint_encoding()
.serialized_size(&a[..])
.unwrap(),
1
); // 2 ** 7 - 1
assert_eq!(
DefaultOptions::new()
.with_varint_encoding()
.serialized_size(&b[..])
.unwrap(),
1
); // 250
assert_eq!(
DefaultOptions::new()
.with_varint_encoding()
.serialized_size(&c[..])
.unwrap(),
(1 + std::mem::size_of::<u16>()) as u64
); // 251
assert_eq!(
DefaultOptions::new()
.with_varint_encoding()
.serialized_size(&d[..])
.unwrap(),
(1 + std::mem::size_of::<u32>()) as u64
); // 2 ** 16 + 1
}
#[test]
fn test_byte_vec_struct() {
#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)]
struct ByteVecs {
a: Vec<u8>,
b: Vec<u8>,
c: Vec<u8>,
};
let byte_struct = ByteVecs {
a: vec![2; 20],
b: vec![3; 30],
c: vec![1; 10],
};
the_same(byte_struct);
}

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

532
third_party/rust/flate2/Cargo.lock сгенерированный поставляемый
Просмотреть файл

@ -1,630 +1,600 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "adler"
version = "0.2.3"
name = "adler32"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e"
[[package]]
name = "autocfg"
version = "1.0.1"
name = "arrayvec"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
dependencies = [
"nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bitflags"
version = "1.2.1"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "byteorder"
version = "1.3.4"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
[[package]]
name = "bytes"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c"
dependencies = [
"byteorder",
"iovec",
"byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "c2-chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "cc"
version = "1.0.61"
version = "1.0.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d"
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cloudabi"
version = "0.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
dependencies = [
"bitflags",
]
[[package]]
name = "cloudflare-zlib-sys"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e195cb274a0d6ee87e718838a09baecd7cbc9f6075dac256a84cb5842739c06"
dependencies = [
"cc",
]
[[package]]
name = "cmake"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e56268c17a6248366d66d4a47a3381369d068cce8409bb1716ed77ea32163bb"
dependencies = [
"cc",
"bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crc32fast"
version = "1.2.1"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
dependencies = [
"cfg-if 1.0.0",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-deque"
version = "0.7.3"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
"maybe-uninit",
"crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-epoch"
version = "0.8.2"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
dependencies = [
"autocfg",
"cfg-if 0.1.10",
"crossbeam-utils",
"lazy_static",
"maybe-uninit",
"memoffset",
"scopeguard",
"arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-queue"
version = "0.2.3"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
dependencies = [
"cfg-if 0.1.10",
"crossbeam-utils",
"maybe-uninit",
"crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "crossbeam-utils"
version = "0.7.2"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
dependencies = [
"autocfg",
"cfg-if 0.1.10",
"lazy_static",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "flate2"
version = "1.0.19"
version = "1.0.12"
dependencies = [
"cfg-if 1.0.0",
"cloudflare-zlib-sys",
"crc32fast",
"futures",
"libc",
"libz-sys",
"miniz-sys",
"miniz_oxide",
"quickcheck",
"rand",
"tokio-io",
"tokio-tcp",
"tokio-threadpool",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
"miniz-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
"miniz_oxide 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"quickcheck 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-threadpool 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fnv"
version = "1.0.7"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "fuchsia-zircon"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
dependencies = [
"bitflags",
"fuchsia-zircon-sys",
"bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fuchsia-zircon-sys"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
[[package]]
name = "futures"
version = "0.1.30"
version = "0.1.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c7e4c2612746b0df8fed4ce0c69156021b704c9aefa360311c04e6e9e002eed"
[[package]]
name = "getrandom"
version = "0.1.15"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
dependencies = [
"cfg-if 0.1.10",
"libc",
"wasi",
]
[[package]]
name = "hermit-abi"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
dependencies = [
"libc",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "iovec"
version = "0.1.4"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e"
dependencies = [
"libc",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "kernel32-sys"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
dependencies = [
"winapi 0.2.8",
"winapi-build",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.80"
version = "0.2.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614"
[[package]]
name = "libz-sys"
version = "1.1.2"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
dependencies = [
"cc",
"cmake",
"libc",
"pkg-config",
"vcpkg",
"cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
"vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "lock_api"
version = "0.3.4"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75"
dependencies = [
"scopeguard",
"scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "log"
version = "0.4.11"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
dependencies = [
"cfg-if 0.1.10",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "maybe-uninit"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
[[package]]
name = "memoffset"
version = "0.5.6"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
dependencies = [
"autocfg",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "miniz-sys"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e9e3ae51cea1576ceba0dde3d484d30e6e5b86dee0b2d412fe3a16a15c98202"
dependencies = [
"cc",
"libc",
"cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "miniz_oxide"
version = "0.4.3"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
dependencies = [
"adler",
"autocfg",
"adler32 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "mio"
version = "0.6.22"
version = "0.6.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430"
dependencies = [
"cfg-if 0.1.10",
"fuchsia-zircon",
"fuchsia-zircon-sys",
"iovec",
"kernel32-sys",
"libc",
"log",
"miow",
"net2",
"slab",
"winapi 0.2.8",
"fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "miow"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
dependencies = [
"kernel32-sys",
"net2",
"winapi 0.2.8",
"ws2_32-sys",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "net2"
version = "0.2.35"
version = "0.2.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ebc3ec692ed7c9a255596c67808dee269f64655d8baf7b4f0638e51ba1d6853"
dependencies = [
"cfg-if 0.1.10",
"libc",
"winapi 0.3.9",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num_cpus"
version = "1.13.0"
name = "nodrop"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "num_cpus"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
dependencies = [
"hermit-abi",
"libc",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "parking_lot"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"
dependencies = [
"lock_api",
"parking_lot_core",
"rustc_version",
"lock_api 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "parking_lot_core"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"
dependencies = [
"cfg-if 0.1.10",
"cloudabi",
"libc",
"redox_syscall",
"rustc_version",
"smallvec",
"winapi 0.3.9",
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "pkg-config"
version = "0.3.19"
version = "0.3.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
[[package]]
name = "ppv-lite86"
version = "0.2.9"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20"
[[package]]
name = "quickcheck"
version = "0.9.2"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f"
dependencies = [
"rand",
"rand_core",
"rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand"
version = "0.7.3"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
"getrandom",
"libc",
"rand_chacha",
"rand_core",
"rand_hc",
"getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
"rand_core",
"c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
"getrandom",
"getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
"rand_core",
"rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "redox_syscall"
version = "0.1.57"
version = "0.1.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
dependencies = [
"semver",
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
dependencies = [
"semver-parser",
"semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "slab"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
[[package]]
name = "smallvec"
version = "0.6.13"
version = "0.6.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6"
dependencies = [
"maybe-uninit",
]
[[package]]
name = "tokio-executor"
version = "0.1.10"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671"
dependencies = [
"crossbeam-utils",
"futures",
"crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tokio-io"
version = "0.1.13"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674"
dependencies = [
"bytes",
"futures",
"log",
"bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tokio-reactor"
version = "0.1.12"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351"
dependencies = [
"crossbeam-utils",
"futures",
"lazy_static",
"log",
"mio",
"num_cpus",
"parking_lot",
"slab",
"tokio-executor",
"tokio-io",
"tokio-sync",
"crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-sync 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tokio-sync"
version = "0.1.8"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee"
dependencies = [
"fnv",
"futures",
"fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tokio-tcp"
version = "0.1.4"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72"
dependencies = [
"bytes",
"futures",
"iovec",
"mio",
"tokio-io",
"tokio-reactor",
"bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-reactor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tokio-threadpool"
version = "0.1.18"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89"
dependencies = [
"crossbeam-deque",
"crossbeam-queue",
"crossbeam-utils",
"futures",
"lazy_static",
"log",
"num_cpus",
"slab",
"tokio-executor",
"crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "vcpkg"
version = "0.2.10"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c"
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "winapi"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
[[package]]
name = "winapi"
version = "0.3.9"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
"winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "winapi-build"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "ws2_32-sys"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
dependencies = [
"winapi 0.2.8",
"winapi-build",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[metadata]
"checksum adler32 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2"
"checksum arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba"
"checksum bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a606a02debe2813760609f57a64a2ffd27d9fdf5b2f133eaca0b248dd92cdd2"
"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5"
"checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c"
"checksum c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d64d04786e0f528460fc884753cf8dddcc466be308f6026f8e355c41a0e4101"
"checksum cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)" = "4fc9a35e1f4290eb9e5fc54ba6cf40671ed2a2514c3eeb2b2a908dda2ea5a1be"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
"checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1"
"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71"
"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9"
"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
"checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef"
"checksum getrandom 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "473a1265acc8ff1e808cd0a1af8cee3c2ee5200916058a2ca113c29f2d903571"
"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
"checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba"
"checksum libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe"
"checksum lock_api 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f8912e782533a93a167888781b836336a6ca5da6175c05944c86cf28c31104dc"
"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f"
"checksum miniz-sys 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "1e9e3ae51cea1576ceba0dde3d484d30e6e5b86dee0b2d412fe3a16a15c98202"
"checksum miniz_oxide 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7108aff85b876d06f22503dcce091e29f76733b2bfdd91eebce81f5e68203a10"
"checksum mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)" = "83f51996a3ed004ef184e16818edc51fadffe8e7ca68be67f9dee67d84d0ff23"
"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945"
"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273"
"checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"
"checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"
"checksum pkg-config 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "72d5370d90f49f70bd033c3d75e87fc529fbfff9d6f7cccef07d6170079d91ea"
"checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b"
"checksum quickcheck 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d5ca504a2fdaa08d3517f442fbbba91ac24d1ec4c51ea68688a038765e3b2662"
"checksum rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae1b169243eaf61759b8475a998f0a385e42042370f3a7dbaf35246eacc8412"
"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853"
"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
"checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7"
"checksum tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0f27ee0e6db01c5f0b2973824547ce7e637b2ed79b891a9677b0de9bd532b6ac"
"checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926"
"checksum tokio-reactor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "c56391be9805bc80163151c0b9e5164ee64f4b0200962c346fea12773158f22d"
"checksum tokio-sync 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2162248ff317e2bc713b261f242b69dbb838b85248ed20bb21df56d60ea4cae7"
"checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119"
"checksum tokio-threadpool 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2bd2c6a3885302581f4401c82af70d792bb9df1700e7437b0aeb4ada94d5388c"
"checksum vcpkg 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "33dd455d0f96e90a75803cfeb7f948768c08d70a6de9a8d2362461935698bf95"
"checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"

41
third_party/rust/flate2/Cargo.toml поставляемый
Просмотреть файл

@ -13,46 +13,40 @@
[package]
edition = "2018"
name = "flate2"
version = "1.0.19"
authors = ["Alex Crichton <alex@alexcrichton.com>", "Josh Triplett <josh@joshtriplett.org>"]
description = "DEFLATE compression and decompression exposed as Read/BufRead/Write streams.\nSupports miniz_oxide, miniz.c, and multiple zlib implementations. Supports\nzlib, gzip, and raw deflate streams.\n"
homepage = "https://github.com/rust-lang/flate2-rs"
version = "1.0.12"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
description = "Bindings to miniz.c for DEFLATE compression and decompression exposed as\nReader/Writer streams. Contains bindings for zlib, deflate, and gzip-based\nstreams.\n"
homepage = "https://github.com/alexcrichton/flate2-rs"
documentation = "https://docs.rs/flate2"
readme = "README.md"
keywords = ["gzip", "deflate", "zlib", "zlib-ng", "encoding"]
keywords = ["gzip", "flate", "zlib", "encoding"]
categories = ["compression", "api-bindings"]
license = "MIT/Apache-2.0"
repository = "https://github.com/rust-lang/flate2-rs"
repository = "https://github.com/alexcrichton/flate2-rs"
[dependencies.cfg-if]
version = "1.0.0"
[dependencies.cloudflare-zlib-sys]
version = "0.2.0"
optional = true
version = "0.1"
[dependencies.crc32fast]
version = "1.2.0"
version = "1.1"
[dependencies.futures]
version = "0.1.25"
optional = true
[dependencies.libc]
version = "0.2.65"
version = "0.2"
[dependencies.libz-sys]
version = "1.1.0"
version = "1.0"
optional = true
default-features = false
[dependencies.miniz-sys]
version = "0.1.11"
optional = true
[dependencies.miniz_oxide]
version = "0.4.0"
version = "0.3.2"
optional = true
default-features = false
[dependencies.tokio-io]
version = "0.1.11"
@ -77,13 +71,14 @@ version = "0.1.3"
version = "0.1.10"
[features]
any_zlib = []
cloudflare_zlib = ["any_zlib", "cloudflare-zlib-sys"]
default = ["rust_backend"]
rust_backend = ["miniz_oxide"]
tokio = ["tokio-io", "futures"]
zlib = ["any_zlib", "libz-sys"]
zlib-ng-compat = ["zlib", "libz-sys/zlib-ng"]
zlib = ["libz-sys"]
[target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dependencies.miniz_oxide]
version = "0.4.0"
default-features = false
version = "0.3.2"
[badges.appveyor]
repository = "alexcrichton/flate2-rs"
[badges.travis-ci]
repository = "alexcrichton/flate2-rs"

59
third_party/rust/flate2/README.md поставляемый
Просмотреть файл

@ -3,11 +3,11 @@
[![Crates.io](https://img.shields.io/crates/v/flate2.svg?maxAge=2592000)](https://crates.io/crates/flate2)
[![Documentation](https://docs.rs/flate2/badge.svg)](https://docs.rs/flate2)
A streaming compression/decompression library DEFLATE-based streams in Rust.
A streaming compression/decompression library DEFALTE-based streams in Rust.
This crate by default uses the `miniz_oxide` crate, a port of `miniz.c` to pure
Rust. This crate also supports other [backends](#Backends), such as the widely
available zlib library or the high-performance zlib-ng library.
This crate by default implemented as a wrapper around the `miniz_oxide` crate, a
port of `miniz.c` to Rust. This crate can also optionally use the zlib library
or `miniz.c` itself.
Supported formats:
@ -21,6 +21,20 @@ Supported formats:
flate2 = "1.0"
```
Using zlib instead of the Rust backend:
```toml
[dependencies]
flate2 = { version = "1.0", features = ["zlib"], default-features = false }
```
Using `miniz.c`:
```toml
[dependencies]
flate2 = { version = "1.0", features = ["miniz-sys"], default-features = false }
```
## Compression
```rust
@ -50,43 +64,6 @@ fn main() {
}
```
## Backends
The default `miniz_oxide` backend has the advantage of being pure Rust, but if
you're already using zlib with another C library, for example, you can use that
for Rust code as well:
```toml
[dependencies]
flate2 = { version = "1.0.17", features = ["zlib"], default-features = false }
```
This supports either the high-performance zlib-ng backend (in zlib-compat mode)
or the use of a shared system zlib library. To explicitly opt into the fast
zlib-ng backend, use:
```toml
[dependencies]
flate2 = { version = "1.0.17", features = ["zlib-ng-compat"], default-features = false }
```
Note that if any crate in your dependency graph explicitly requests stock zlib,
or uses libz-sys directly without `default-features = false`, you'll get stock
zlib rather than zlib-ng. See [the libz-sys
README](https://github.com/rust-lang/libz-sys/blob/main/README.md) for details.
For compatibility with previous versions of `flate2`, the cloudflare optimized
version of zlib is available, via the `cloudflare_zlib` feature. It's not as
fast as zlib-ng, but it's faster than stock zlib. It requires a x86-64 CPU with
SSE 4.2 or ARM64 with NEON & CRC. It does not support 32-bit CPUs at all and is
incompatible with mingw. For more information check the [crate
documentation](https://crates.io/crates/cloudflare-zlib-sys). Note that
`cloudflare_zlib` will cause breakage if any other crate in your crate graph
uses another version of zlib/libz.
For compatibility with previous versions of `flate2`, the C version of `miniz.c`
is still available, using the feature `miniz-sys`.
# License
This project is licensed under either of

Просмотреть файл

@ -1,28 +0,0 @@
extern crate flate2;
use flate2::write::GzEncoder;
use flate2::Compression;
use std::env::args;
use std::fs::File;
use std::io::copy;
use std::io::BufReader;
use std::time::Instant;
fn main() {
if args().len() != 3 {
eprintln!("Usage: ./compress_file `source` `target`");
return;
}
let mut input = BufReader::new(File::open(args().nth(1).unwrap()).unwrap());
let output = File::create(args().nth(2).unwrap()).unwrap();
let mut encoder = GzEncoder::new(output, Compression::default());
let start = Instant::now();
copy(&mut input, &mut encoder).unwrap();
let output = encoder.finish().unwrap();
println!(
"Source len: {:?}",
input.get_ref().metadata().unwrap().len()
);
println!("Target len: {:?}", output.metadata().unwrap().len());
println!("Elapsed: {:?}", start.elapsed());
}

Просмотреть файл

@ -12,8 +12,8 @@ use crate::{Compress, Decompress};
/// A DEFLATE encoder, or compressor.
///
/// This structure consumes a [`BufRead`] interface, reading uncompressed data
/// from the underlying reader, and emitting compressed data.
/// This structure implements a [`BufRead`] interface and will read uncompressed
/// data from an underlying stream and emit a stream of compressed data.
///
/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html
///
@ -138,8 +138,8 @@ impl<R: AsyncWrite + BufRead> AsyncWrite for DeflateEncoder<R> {
/// A DEFLATE decoder, or decompressor.
///
/// This structure consumes a [`BufRead`] interface, reading compressed data
/// from the underlying reader, and emitting uncompressed data.
/// This structure implements a [`BufRead`] interface and takes a stream of
/// compressed data as input, providing the decompressed data when read from.
///
/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html
///

91
third_party/rust/flate2/src/ffi/c.rs поставляемый
Просмотреть файл

@ -1,3 +1,4 @@
//! Implementation for C backends.
use std::alloc::{self, Layout};
use std::cmp;
@ -38,13 +39,13 @@ impl Default for StreamWrapper {
reserved: 0,
opaque: ptr::null_mut(),
state: ptr::null_mut(),
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
zalloc,
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
zfree,
#[cfg(not(feature = "any_zlib"))]
#[cfg(not(feature = "zlib"))]
zalloc: Some(zalloc),
#[cfg(not(feature = "any_zlib"))]
#[cfg(not(feature = "zlib"))]
zfree: Some(zfree),
}),
}
@ -219,7 +220,7 @@ impl InflateBackend for Inflate {
}
}
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
fn reset(&mut self, zlib_header: bool) {
let bits = if zlib_header {
MZ_DEFAULT_WINDOW_BITS
@ -233,7 +234,7 @@ impl InflateBackend for Inflate {
self.inner.total_in = 0;
}
#[cfg(not(feature = "any_zlib"))]
#[cfg(not(feature = "zlib"))]
fn reset(&mut self, zlib_header: bool) {
*self = Self::make(zlib_header, MZ_DEFAULT_WINDOW_BITS as u8);
}
@ -273,7 +274,7 @@ impl DeflateBackend for Deflate {
} else {
-(window_bits as c_int)
},
8,
9,
MZ_DEFAULT_STRATEGY,
);
assert_eq!(ret, 0);
@ -338,17 +339,14 @@ impl Backend for Deflate {
pub use self::c_backend::*;
/// Miniz specific
#[cfg(not(feature = "any_zlib"))]
#[cfg(not(feature = "zlib"))]
mod c_backend {
pub use miniz_sys::*;
pub type AllocSize = libc::size_t;
}
/// Zlib specific
#[cfg(any(
feature = "zlib-ng-compat",
all(feature = "zlib", not(feature = "cloudflare_zlib"))
))]
#[cfg(feature = "zlib")]
#[allow(bad_style)]
mod c_backend {
use libc::{c_char, c_int};
@ -401,7 +399,10 @@ mod c_backend {
mem::size_of::<mz_stream>() as c_int,
)
}
pub unsafe extern "C" fn mz_inflateInit2(stream: *mut mz_stream, window_bits: c_int) -> c_int {
pub unsafe extern "C" fn mz_inflateInit2(
stream: *mut mz_stream,
window_bits: c_int,
) -> c_int {
libz_sys::inflateInit2_(
stream,
window_bits,
@ -410,67 +411,3 @@ mod c_backend {
)
}
}
/// Cloudflare optimized Zlib specific
#[cfg(all(feature = "cloudflare_zlib", not(feature = "zlib-ng-compat")))]
#[allow(bad_style)]
mod c_backend {
use libc::{c_char, c_int};
use std::mem;
pub use cloudflare_zlib_sys::deflate as mz_deflate;
pub use cloudflare_zlib_sys::deflateEnd as mz_deflateEnd;
pub use cloudflare_zlib_sys::deflateReset as mz_deflateReset;
pub use cloudflare_zlib_sys::inflate as mz_inflate;
pub use cloudflare_zlib_sys::inflateEnd as mz_inflateEnd;
pub use cloudflare_zlib_sys::z_stream as mz_stream;
pub use cloudflare_zlib_sys::*;
pub use cloudflare_zlib_sys::Z_BLOCK as MZ_BLOCK;
pub use cloudflare_zlib_sys::Z_BUF_ERROR as MZ_BUF_ERROR;
pub use cloudflare_zlib_sys::Z_DATA_ERROR as MZ_DATA_ERROR;
pub use cloudflare_zlib_sys::Z_DEFAULT_STRATEGY as MZ_DEFAULT_STRATEGY;
pub use cloudflare_zlib_sys::Z_DEFLATED as MZ_DEFLATED;
pub use cloudflare_zlib_sys::Z_FINISH as MZ_FINISH;
pub use cloudflare_zlib_sys::Z_FULL_FLUSH as MZ_FULL_FLUSH;
pub use cloudflare_zlib_sys::Z_NEED_DICT as MZ_NEED_DICT;
pub use cloudflare_zlib_sys::Z_NO_FLUSH as MZ_NO_FLUSH;
pub use cloudflare_zlib_sys::Z_OK as MZ_OK;
pub use cloudflare_zlib_sys::Z_PARTIAL_FLUSH as MZ_PARTIAL_FLUSH;
pub use cloudflare_zlib_sys::Z_STREAM_END as MZ_STREAM_END;
pub use cloudflare_zlib_sys::Z_STREAM_ERROR as MZ_STREAM_ERROR;
pub use cloudflare_zlib_sys::Z_SYNC_FLUSH as MZ_SYNC_FLUSH;
pub type AllocSize = cloudflare_zlib_sys::uInt;
pub const MZ_DEFAULT_WINDOW_BITS: c_int = 15;
const ZLIB_VERSION: &'static str = "1.2.8\0";
pub unsafe extern "C" fn mz_deflateInit2(
stream: *mut mz_stream,
level: c_int,
method: c_int,
window_bits: c_int,
mem_level: c_int,
strategy: c_int,
) -> c_int {
cloudflare_zlib_sys::deflateInit2_(
stream,
level,
method,
window_bits,
mem_level,
strategy,
ZLIB_VERSION.as_ptr() as *const c_char,
mem::size_of::<mz_stream>() as c_int,
)
}
pub unsafe extern "C" fn mz_inflateInit2(stream: *mut mz_stream, window_bits: c_int) -> c_int {
cloudflare_zlib_sys::inflateInit2_(
stream,
window_bits,
ZLIB_VERSION.as_ptr() as *const c_char,
mem::size_of::<mz_stream>() as c_int,
)
}
}

9
third_party/rust/flate2/src/ffi/mod.rs поставляемый
Просмотреть файл

@ -34,9 +34,14 @@ pub trait DeflateBackend: Backend {
fn reset(&mut self);
}
// Default to Rust implementation unless explicitly opted in to a different backend.
// hardwire wasm to the Rust implementation so no C compilers need be dealt
// with there, otherwise if miniz-sys/zlib are enabled we use that and fall
// back to the default of Rust
cfg_if::cfg_if! {
if #[cfg(any(feature = "miniz-sys", feature = "any_zlib"))] {
if #[cfg(target_arch = "wasm32")] {
mod rust;
pub use self::rust::*;
} else if #[cfg(any(feature = "miniz-sys", feature = "zlib"))] {
mod c;
pub use self::c::*;
} else {

6
third_party/rust/flate2/src/gz/bufread.rs поставляемый
Просмотреть файл

@ -265,10 +265,10 @@ impl<R: BufRead + Write> Write for GzEncoder<R> {
/// A gzip streaming decoder
///
/// This structure consumes a [`BufRead`] interface, reading compressed data
/// from the underlying reader, and emitting uncompressed data.
/// This structure exposes a [`ReadBuf`] interface that will consume compressed
/// data from the underlying reader and emit uncompressed data.
///
/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html
/// [`ReadBuf`]: https://doc.rust-lang.org/std/io/trait.BufRead.html
///
/// # Examples
///

8
third_party/rust/flate2/src/lib.rs поставляемый
Просмотреть файл

@ -192,23 +192,23 @@ impl Compression {
///
/// The integer here is typically on a scale of 0-9 where 0 means "no
/// compression" and 9 means "take as long as you'd like".
pub const fn new(level: u32) -> Compression {
pub fn new(level: u32) -> Compression {
Compression(level)
}
/// No compression is to be performed, this may actually inflate data
/// slightly when encoding.
pub const fn none() -> Compression {
pub fn none() -> Compression {
Compression(0)
}
/// Optimize for the best speed of encoding.
pub const fn fast() -> Compression {
pub fn fast() -> Compression {
Compression(1)
}
/// Optimize for the size of data being encoded.
pub const fn best() -> Compression {
pub fn best() -> Compression {
Compression(9)
}

32
third_party/rust/flate2/src/mem.rs поставляемый
Просмотреть файл

@ -209,7 +209,7 @@ impl Compress {
///
/// This constructor is only available when the `zlib` feature is used.
/// Other backends currently do not support custom window bits.
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
pub fn new_with_window_bits(
level: Compression,
zlib_header: bool,
@ -235,7 +235,7 @@ impl Compress {
/// Specifies the compression dictionary to use.
///
/// Returns the Adler-32 checksum of the dictionary.
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result<u32, CompressError> {
let stream = &mut *self.inner.inner.stream_wrapper;
let rc = unsafe {
@ -267,7 +267,7 @@ impl Compress {
/// the compression of the available input data before changing the
/// compression level. Flushing the stream before calling this method
/// ensures that the function will succeed on the first call.
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
pub fn set_level(&mut self, level: Compression) -> Result<(), CompressError> {
use libc::c_int;
let stream = &mut *self.inner.inner.stream_wrapper;
@ -353,7 +353,7 @@ impl Decompress {
///
/// This constructor is only available when the `zlib` feature is used.
/// Other backends currently do not support custom window bits.
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
pub fn new_with_window_bits(zlib_header: bool, window_bits: u8) -> Decompress {
Decompress {
inner: Inflate::make(zlib_header, window_bits),
@ -439,7 +439,7 @@ impl Decompress {
}
/// Specifies the decompression dictionary to use.
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
pub fn set_dictionary(&mut self, dictionary: &[u8]) -> Result<u32, DecompressError> {
let stream = &mut *self.inner.inner.stream_wrapper;
let rc = unsafe {
@ -470,7 +470,11 @@ impl Decompress {
}
}
impl Error for DecompressError {}
impl Error for DecompressError {
fn description(&self) -> &str {
"deflate decompression error"
}
}
impl From<DecompressError> for io::Error {
fn from(data: DecompressError) -> io::Error {
@ -480,11 +484,15 @@ impl From<DecompressError> for io::Error {
impl fmt::Display for DecompressError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "deflate decompression error")
self.description().fmt(f)
}
}
impl Error for CompressError {}
impl Error for CompressError {
fn description(&self) -> &str {
"deflate compression error"
}
}
impl From<CompressError> for io::Error {
fn from(data: CompressError) -> io::Error {
@ -494,7 +502,7 @@ impl From<CompressError> for io::Error {
impl fmt::Display for CompressError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "deflate decompression error")
self.description().fmt(f)
}
}
@ -505,7 +513,7 @@ mod tests {
use crate::write;
use crate::{Compression, Decompress, FlushDecompress};
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
use crate::{Compress, FlushCompress};
#[test]
@ -566,7 +574,7 @@ mod tests {
assert!(dst.starts_with(string));
}
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
#[test]
fn set_dictionary_with_zlib_header() {
let string = "hello, hello!".as_bytes();
@ -615,7 +623,7 @@ mod tests {
assert_eq!(&decoded[..decoder.total_out() as usize], string);
}
#[cfg(feature = "any_zlib")]
#[cfg(feature = "zlib")]
#[test]
fn set_dictionary_raw() {
let string = "hello, hello!".as_bytes();

8
third_party/rust/flate2/src/zlib/bufread.rs поставляемый
Просмотреть файл

@ -12,8 +12,8 @@ use crate::{Compress, Decompress};
/// A ZLIB encoder, or compressor.
///
/// This structure consumes a [`BufRead`] interface, reading uncompressed data
/// from the underlying reader, and emitting compressed data.
/// This structure implements a [`BufRead`] interface and will read uncompressed
/// data from an underlying stream and emit a stream of compressed data.
///
/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html
///
@ -134,8 +134,8 @@ impl<R: AsyncWrite + BufRead> AsyncWrite for ZlibEncoder<R> {
/// A ZLIB decoder, or decompressor.
///
/// This structure consumes a [`BufRead`] interface, reading compressed data
/// from the underlying reader, and emitting uncompressed data.
/// This structure implements a [`BufRead`] interface and takes a stream of
/// compressed data as input, providing the decompressed data when read from.
///
/// [`BufRead`]: https://doc.rust-lang.org/std/io/trait.BufRead.html
///

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

164
third_party/rust/glean-core/Cargo.lock сгенерированный поставляемый
Просмотреть файл

@ -25,9 +25,9 @@ dependencies = [
[[package]]
name = "autocfg"
version = "1.0.1"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
[[package]]
name = "bincode"
@ -53,9 +53,9 @@ checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
[[package]]
name = "cc"
version = "1.0.61"
version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d"
checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518"
[[package]]
name = "cfg-if"
@ -63,40 +63,32 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.19"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6"
dependencies = [
"libc",
"num-integer",
"num-traits",
"serde",
"time",
"winapi",
]
[[package]]
name = "crc32fast"
version = "1.2.1"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
]
[[package]]
name = "ctor"
version = "0.1.16"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484"
checksum = "39858aa5bac06462d4dd4b9164848eb81ffc4aa5c479746393598fd193afa227"
dependencies = [
"quote",
"syn",
@ -137,9 +129,9 @@ dependencies = [
[[package]]
name = "ffi-support"
version = "0.4.2"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f85d4d1be103c0b2d86968f0b0690dc09ac0ba205b90adb0389b552869e5000e"
checksum = "087be066eb6e85d7150f0c5400018a32802f99d688b2d3868c526f7bbfe17960"
dependencies = [
"lazy_static",
"log",
@ -147,11 +139,11 @@ dependencies = [
[[package]]
name = "flate2"
version = "1.0.19"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129"
checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e"
dependencies = [
"cfg-if 1.0.0",
"cfg-if",
"crc32fast",
"libc",
"miniz_oxide",
@ -159,18 +151,18 @@ dependencies = [
[[package]]
name = "getrandom"
version = "0.1.15"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
dependencies = [
"cfg-if 0.1.10",
"cfg-if",
"libc",
"wasi 0.9.0+wasi-snapshot-preview1",
"wasi",
]
[[package]]
name = "glean-core"
version = "33.1.2"
version = "31.4.0"
dependencies = [
"bincode",
"chrono",
@ -190,9 +182,9 @@ dependencies = [
[[package]]
name = "hermit-abi"
version = "0.1.17"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9"
dependencies = [
"libc",
]
@ -206,12 +198,6 @@ dependencies = [
"quick-error",
]
[[package]]
name = "id-arena"
version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005"
[[package]]
name = "idna"
version = "0.2.0"
@ -246,9 +232,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.80"
version = "0.2.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614"
checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701"
[[package]]
name = "lmdb-rkv"
@ -279,7 +265,7 @@ version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
dependencies = [
"cfg-if 0.1.10",
"cfg-if",
]
[[package]]
@ -290,18 +276,17 @@ checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
[[package]]
name = "memchr"
version = "2.3.4"
version = "2.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
[[package]]
name = "miniz_oxide"
version = "0.4.3"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d"
checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f"
dependencies = [
"adler",
"autocfg",
]
[[package]]
@ -316,9 +301,9 @@ dependencies = [
[[package]]
name = "num-integer"
version = "0.1.44"
version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b"
dependencies = [
"autocfg",
"num-traits",
@ -326,18 +311,18 @@ dependencies = [
[[package]]
name = "num-traits"
version = "0.2.14"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.4.1"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad"
checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d"
[[package]]
name = "ordered-float"
@ -348,25 +333,6 @@ dependencies = [
"num-traits",
]
[[package]]
name = "paste"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
dependencies = [
"paste-impl",
"proc-macro-hack",
]
[[package]]
name = "paste-impl"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
dependencies = [
"proc-macro-hack",
]
[[package]]
name = "percent-encoding"
version = "2.1.0"
@ -375,27 +341,21 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
[[package]]
name = "pkg-config"
version = "0.3.19"
version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33"
[[package]]
name = "ppv-lite86"
version = "0.2.10"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
[[package]]
name = "proc-macro-hack"
version = "0.5.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea"
[[package]]
name = "proc-macro2"
version = "1.0.24"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa"
dependencies = [
"unicode-xid",
]
@ -473,21 +433,18 @@ dependencies = [
[[package]]
name = "rkv"
version = "0.15.0"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e97d1b6321740ce36d77d67d22ff84ac8a996cf69dbd0727b8bcae52f1c98aaa"
checksum = "30a3dbc1f4971372545ed4175f23ef206c81e5874cd574d153646e7ee78f6793"
dependencies = [
"arrayref",
"bincode",
"bitflags",
"byteorder",
"failure",
"id-arena",
"lazy_static",
"lmdb-rkv",
"log",
"ordered-float",
"paste",
"serde",
"serde_derive",
"url",
@ -502,18 +459,18 @@ checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
[[package]]
name = "serde"
version = "1.0.117"
version = "1.0.114"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a"
checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.117"
version = "1.0.114"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e"
checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e"
dependencies = [
"proc-macro2",
"quote",
@ -522,9 +479,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.59"
version = "1.0.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95"
checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3"
dependencies = [
"itoa",
"ryu",
@ -533,9 +490,9 @@ dependencies = [
[[package]]
name = "syn"
version = "1.0.48"
version = "1.0.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac"
checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b"
dependencies = [
"proc-macro2",
"quote",
@ -560,7 +517,7 @@ version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
dependencies = [
"cfg-if 0.1.10",
"cfg-if",
"libc",
"rand",
"redox_syscall",
@ -579,20 +536,19 @@ dependencies = [
[[package]]
name = "time"
version = "0.1.44"
version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438"
dependencies = [
"libc",
"wasi 0.10.0+wasi-snapshot-preview1",
"winapi",
]
[[package]]
name = "tinyvec"
version = "0.3.4"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "238ce071d267c5710f9d31451efec16c5ee22de34df17cc05e56cbc92e967117"
checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed"
[[package]]
name = "unicode-bidi"
@ -650,12 +606,6 @@ version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "wasi"
version = "0.10.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
[[package]]
name = "winapi"
version = "0.3.9"

15
third_party/rust/glean-core/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "glean-core"
version = "33.1.2"
version = "31.4.0"
authors = ["Jan-Erik Rediger <jrediger@mozilla.com>", "The Glean Team <glean-team@mozilla.com>"]
include = ["/README.md", "/LICENSE", "/src", "/examples", "/tests", "/Cargo.toml"]
description = "A modern Telemetry library"
@ -21,10 +21,8 @@ readme = "README.md"
keywords = ["telemetry"]
license = "MPL-2.0"
repository = "https://github.com/mozilla/glean"
[package.metadata.glean]
glean-parser = "1.29.0"
[dependencies.bincode]
version = "1.3.1"
version = "1.2.1"
[dependencies.chrono]
version = "0.4.10"
@ -34,16 +32,16 @@ features = ["serde"]
version = "0.4.0"
[dependencies.flate2]
version = "1.0.19"
version = "1.0.12"
[dependencies.log]
version = "0.4.8"
[dependencies.once_cell]
version = "1.4.1"
version = "1.2.0"
[dependencies.rkv]
version = "0.15.0"
version = "0.10.3"
[dependencies.serde]
version = "1.0.104"
@ -68,9 +66,6 @@ version = "0.4"
[dev-dependencies.tempfile]
version = "3.1.0"
[features]
rkv-safe-mode = []
[badges.circle-ci]
branch = "main"
repository = "mozilla/glean"

154
third_party/rust/glean-core/examples/sample.rs поставляемый
Просмотреть файл

@ -1,77 +1,77 @@
use std::env;
use glean_core::metrics::*;
use glean_core::ping::PingMaker;
use glean_core::{CommonMetricData, Glean};
use tempfile::Builder;
fn main() {
env_logger::init();
let mut args = env::args().skip(1);
let data_path = if let Some(path) = args.next() {
path
} else {
let root = Builder::new().prefix("simple-db").tempdir().unwrap();
root.path().display().to_string()
};
let cfg = glean_core::Configuration {
data_path,
application_id: "org.mozilla.glean_core.example".into(),
language_binding_name: "Rust".into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
};
let mut glean = Glean::new(cfg).unwrap();
glean.register_ping_type(&PingType::new("baseline", true, false, vec![]));
glean.register_ping_type(&PingType::new("metrics", true, false, vec![]));
let local_metric: StringMetric = StringMetric::new(CommonMetricData {
name: "local_metric".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
});
let call_counter: CounterMetric = CounterMetric::new(CommonMetricData {
name: "calls".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into(), "metrics".into()],
..Default::default()
});
local_metric.set(&glean, "I can set this");
call_counter.add(&glean, 1);
println!("Baseline Data:\n{}", glean.snapshot("baseline", true));
call_counter.add(&glean, 2);
println!("Metrics Data:\n{}", glean.snapshot("metrics", true));
call_counter.add(&glean, 3);
println!();
println!("Baseline Data 2:\n{}", glean.snapshot("baseline", false));
println!("Metrics Data 2:\n{}", glean.snapshot("metrics", true));
let list: StringListMetric = StringListMetric::new(CommonMetricData {
name: "list".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
});
list.add(&glean, "once");
list.add(&glean, "upon");
let ping_maker = PingMaker::new();
let ping = ping_maker
.collect_string(&glean, glean.get_ping_by_name("baseline").unwrap(), None)
.unwrap();
println!("Baseline Ping:\n{}", ping);
let ping = ping_maker.collect_string(&glean, glean.get_ping_by_name("metrics").unwrap(), None);
println!("Metrics Ping: {:?}", ping);
}
use std::env;
use glean_core::metrics::*;
use glean_core::ping::PingMaker;
use glean_core::{CommonMetricData, Glean};
use tempfile::Builder;
fn main() {
env_logger::init();
let mut args = env::args().skip(1);
let data_path = if let Some(path) = args.next() {
path
} else {
let root = Builder::new().prefix("simple-db").tempdir().unwrap();
root.path().display().to_string()
};
let cfg = glean_core::Configuration {
data_path,
application_id: "org.mozilla.glean_core.example".into(),
language_binding_name: "Rust".into(),
upload_enabled: true,
max_events: None,
delay_ping_lifetime_io: false,
};
let mut glean = Glean::new(cfg).unwrap();
glean.register_ping_type(&PingType::new("baseline", true, false, vec![]));
glean.register_ping_type(&PingType::new("metrics", true, false, vec![]));
let local_metric: StringMetric = StringMetric::new(CommonMetricData {
name: "local_metric".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
});
let call_counter: CounterMetric = CounterMetric::new(CommonMetricData {
name: "calls".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into(), "metrics".into()],
..Default::default()
});
local_metric.set(&glean, "I can set this");
call_counter.add(&glean, 1);
println!("Baseline Data:\n{}", glean.snapshot("baseline", true));
call_counter.add(&glean, 2);
println!("Metrics Data:\n{}", glean.snapshot("metrics", true));
call_counter.add(&glean, 3);
println!();
println!("Baseline Data 2:\n{}", glean.snapshot("baseline", false));
println!("Metrics Data 2:\n{}", glean.snapshot("metrics", true));
let list: StringListMetric = StringListMetric::new(CommonMetricData {
name: "list".into(),
category: "local".into(),
send_in_pings: vec!["baseline".into()],
..Default::default()
});
list.add(&glean, "once");
list.add(&glean, "upon");
let ping_maker = PingMaker::new();
let ping = ping_maker
.collect_string(&glean, glean.get_ping_by_name("baseline").unwrap(), None)
.unwrap();
println!("Baseline Ping:\n{}", ping);
let ping = ping_maker.collect_string(&glean, glean.get_ping_by_name("metrics").unwrap(), None);
println!("Metrics Ping: {:?}", ping);
}

Просмотреть файл

@ -1,127 +1,127 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use crate::error::{Error, ErrorKind};
use crate::metrics::dynamic_label;
use crate::Glean;
/// The supported metrics' lifetimes.
///
/// A metric's lifetime determines when its stored data gets reset.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(i32)] // Use i32 to be compatible with our JNA definition
pub enum Lifetime {
/// The metric is reset with each sent ping
Ping,
/// The metric is reset on application restart
Application,
/// The metric is reset with each user profile
User,
}
impl Default for Lifetime {
fn default() -> Self {
Lifetime::Ping
}
}
impl Lifetime {
/// String representation of the lifetime.
pub fn as_str(self) -> &'static str {
match self {
Lifetime::Ping => "ping",
Lifetime::Application => "app",
Lifetime::User => "user",
}
}
}
impl TryFrom<i32> for Lifetime {
type Error = Error;
fn try_from(value: i32) -> Result<Lifetime, Self::Error> {
match value {
0 => Ok(Lifetime::Ping),
1 => Ok(Lifetime::Application),
2 => Ok(Lifetime::User),
e => Err(ErrorKind::Lifetime(e).into()),
}
}
}
/// The common set of data shared across all different metric types.
#[derive(Default, Debug, Clone)]
pub struct CommonMetricData {
/// The metric's name.
pub name: String,
/// The metric's category.
pub category: String,
/// List of ping names to include this metric in.
pub send_in_pings: Vec<String>,
/// The metric's lifetime.
pub lifetime: Lifetime,
/// Whether or not the metric is disabled.
///
/// Disabled metrics are never recorded.
pub disabled: bool,
/// Dynamic label.
/// When a LabeledMetric<T> factory creates the specific metric to be
/// recorded to, dynamic labels are stored in the specific label so that we
/// can validate them when the Glean singleton is available.
pub dynamic_label: Option<String>,
}
impl CommonMetricData {
/// Creates a new metadata object.
pub fn new<A: Into<String>, B: Into<String>, C: Into<String>>(
category: A,
name: B,
ping_name: C,
) -> CommonMetricData {
CommonMetricData {
name: name.into(),
category: category.into(),
send_in_pings: vec![ping_name.into()],
..Default::default()
}
}
/// The metric's base identifier, including the category and name, but not the label.
///
/// If `category` is empty, it's ommitted.
/// Otherwise, it's the combination of the metric's `category` and `name`.
pub(crate) fn base_identifier(&self) -> String {
if self.category.is_empty() {
self.name.clone()
} else {
format!("{}.{}", self.category, self.name)
}
}
/// The metric's unique identifier, including the category, name and label.
///
/// If `category` is empty, it's ommitted.
/// Otherwise, it's the combination of the metric's `category`, `name` and `label`.
pub(crate) fn identifier(&self, glean: &Glean) -> String {
let base_identifier = self.base_identifier();
if let Some(label) = &self.dynamic_label {
dynamic_label(glean, self, &base_identifier, label)
} else {
base_identifier
}
}
/// Whether this metric should be recorded.
pub fn should_record(&self) -> bool {
!self.disabled
}
/// The list of storages this metric should be recorded into.
pub fn storage_names(&self) -> &[String] {
&self.send_in_pings
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use crate::error::{Error, ErrorKind};
use crate::metrics::dynamic_label;
use crate::Glean;
/// The supported metrics' lifetimes.
///
/// A metric's lifetime determines when its stored data gets reset.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(i32)] // Use i32 to be compatible with our JNA definition
pub enum Lifetime {
/// The metric is reset with each sent ping
Ping,
/// The metric is reset on application restart
Application,
/// The metric is reset with each user profile
User,
}
impl Default for Lifetime {
fn default() -> Self {
Lifetime::Ping
}
}
impl Lifetime {
/// String representation of the lifetime.
pub fn as_str(self) -> &'static str {
match self {
Lifetime::Ping => "ping",
Lifetime::Application => "app",
Lifetime::User => "user",
}
}
}
impl TryFrom<i32> for Lifetime {
type Error = Error;
fn try_from(value: i32) -> Result<Lifetime, Self::Error> {
match value {
0 => Ok(Lifetime::Ping),
1 => Ok(Lifetime::Application),
2 => Ok(Lifetime::User),
e => Err(ErrorKind::Lifetime(e).into()),
}
}
}
/// The common set of data shared across all different metric types.
#[derive(Default, Debug, Clone)]
pub struct CommonMetricData {
/// The metric's name.
pub name: String,
/// The metric's category.
pub category: String,
/// List of ping names to include this metric in.
pub send_in_pings: Vec<String>,
/// The metric's lifetime.
pub lifetime: Lifetime,
/// Whether or not the metric is disabled.
///
/// Disabled metrics are never recorded.
pub disabled: bool,
/// Dynamic label.
/// When a LabeledMetric<T> factory creates the specific metric to be
/// recorded to, dynamic labels are stored in the specific label so that we
/// can validate them when the Glean singleton is available.
pub dynamic_label: Option<String>,
}
impl CommonMetricData {
/// Create a new metadata object.
pub fn new<A: Into<String>, B: Into<String>, C: Into<String>>(
category: A,
name: B,
ping_name: C,
) -> CommonMetricData {
CommonMetricData {
name: name.into(),
category: category.into(),
send_in_pings: vec![ping_name.into()],
..Default::default()
}
}
/// The metric's base identifier, including the category and name, but not the label.
///
/// If `category` is empty, it's ommitted.
/// Otherwise, it's the combination of the metric's `category` and `name`.
pub(crate) fn base_identifier(&self) -> String {
if self.category.is_empty() {
self.name.clone()
} else {
format!("{}.{}", self.category, self.name)
}
}
/// The metric's unique identifier, including the category, name and label.
///
/// If `category` is empty, it's ommitted.
/// Otherwise, it's the combination of the metric's `category`, `name` and `label`.
pub(crate) fn identifier(&self, glean: &Glean) -> String {
let base_identifier = self.base_identifier();
if let Some(label) = &self.dynamic_label {
dynamic_label(glean, self, &base_identifier, label)
} else {
base_identifier
}
}
/// Whether this metric should be recorded.
pub fn should_record(&self) -> bool {
!self.disabled
}
/// The list of storages this metric should be recorded into.
pub fn storage_names(&self) -> &[String] {
&self.send_in_pings
}
}

2235
third_party/rust/glean-core/src/database/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

536
third_party/rust/glean-core/src/debug.rs поставляемый
Просмотреть файл

@ -1,321 +1,215 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! # Debug options
//!
//! The debug options for Glean may be set by calling one of the `set_*` functions
//! or by setting specific environment variables.
//!
//! The environment variables will be read only once when the options are initialized.
//!
//! The possible debugging features available out of the box are:
//!
//! * **Ping logging** - logging the contents of ping requests that are correctly assembled;
//! This may be set by calling glean.set_log_pings(value: bool)
//! or by setting the environment variable GLEAN_LOG_PINGS="true";
//! * **Debug tagging** - Adding the X-Debug-ID header to every ping request,
//! allowing these tagged pings to be sent to the ["Ping Debug Viewer"](https://mozilla.github.io/glean/book/dev/core/internal/debug-pings.html).
//! This may be set by calling glean.set_debug_view_tag(value: &str)
//! or by setting the environment variable GLEAN_DEBUG_VIEW_TAG=<some tag>;
//! * **Source tagging** - Adding the X-Source-Tags header to every ping request,
//! allowing pings to be tagged with custom labels.
//! This may be set by calling glean.set_source_tags(value: Vec<String>)
//! or by setting the environment variable GLEAN_SOURCE_TAGS=<some, tags>;
//!
//! Bindings may implement other debugging features, e.g. sending pings on demand.
use std::env;
const GLEAN_LOG_PINGS: &str = "GLEAN_LOG_PINGS";
const GLEAN_DEBUG_VIEW_TAG: &str = "GLEAN_DEBUG_VIEW_TAG";
const GLEAN_SOURCE_TAGS: &str = "GLEAN_SOURCE_TAGS";
const GLEAN_MAX_SOURCE_TAGS: usize = 5;
/// A representation of all of Glean's debug options.
pub struct DebugOptions {
/// Option to log the payload of pings that are successfully assembled into a ping request.
pub log_pings: DebugOption<bool>,
/// Option to add the X-Debug-ID header to every ping request.
pub debug_view_tag: DebugOption<String>,
/// Option to add the X-Source-Tags header to ping requests. This will allow the data
/// consumers to classify data depending on the applied tags.
pub source_tags: DebugOption<Vec<String>>,
}
impl std::fmt::Debug for DebugOptions {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("DebugOptions")
.field("log_pings", &self.log_pings.get())
.field("debug_view_tag", &self.debug_view_tag.get())
.field("source_tags", &self.source_tags.get())
.finish()
}
}
impl DebugOptions {
pub fn new() -> Self {
Self {
log_pings: DebugOption::new(GLEAN_LOG_PINGS, get_bool_from_str, None),
debug_view_tag: DebugOption::new(GLEAN_DEBUG_VIEW_TAG, Some, Some(validate_tag)),
source_tags: DebugOption::new(
GLEAN_SOURCE_TAGS,
tokenize_string,
Some(validate_source_tags),
),
}
}
}
/// A representation of a debug option,
/// where the value can be set programmatically or come from an environment variable.
#[derive(Debug)]
pub struct DebugOption<T, E = fn(String) -> Option<T>, V = fn(&T) -> bool> {
/// The name of the environment variable related to this debug option.
env: String,
/// The actual value of this option.
value: Option<T>,
/// Function to extract the data of type `T` from a `String`, used when
/// extracting data from the environment.
extraction: E,
/// Optional function to validate the value parsed from the environment
/// or passed to the `set` function.
validation: Option<V>,
}
impl<T, E, V> DebugOption<T, E, V>
where
T: Clone,
E: Fn(String) -> Option<T>,
V: Fn(&T) -> bool,
{
/// Creates a new debug option.
///
/// Tries to get the initial value of the option from the environment.
pub fn new(env: &str, extraction: E, validation: Option<V>) -> Self {
let mut option = Self {
env: env.into(),
value: None,
extraction,
validation,
};
option.set_from_env();
option
}
fn validate(&self, value: &T) -> bool {
if let Some(f) = self.validation.as_ref() {
f(value)
} else {
true
}
}
fn set_from_env(&mut self) {
let extract = &self.extraction;
match env::var(&self.env) {
Ok(env_value) => match extract(env_value.clone()) {
Some(v) => {
self.set(v);
}
None => {
log::error!(
"Unable to parse debug option {}={} into {}. Ignoring.",
self.env,
env_value,
std::any::type_name::<T>()
);
}
},
Err(env::VarError::NotUnicode(_)) => {
log::error!("The value of {} is not valid unicode. Ignoring.", self.env)
}
// The other possible error is that the env var is not set,
// which is not an error for us and can safely be ignored.
Err(_) => {}
}
}
/// Tries to set a value for this debug option.
///
/// Validates the value in case a validation function is available.
///
/// # Returns
///
/// Whether the option passed validation and was succesfully set.
pub fn set(&mut self, value: T) -> bool {
let validated = self.validate(&value);
if validated {
log::info!("Setting the debug option {}.", self.env);
self.value = Some(value);
return true;
}
log::info!("Invalid value for debug option {}.", self.env);
false
}
/// Gets the value of this debug option.
pub fn get(&self) -> Option<&T> {
self.value.as_ref()
}
}
fn get_bool_from_str(value: String) -> Option<bool> {
std::str::FromStr::from_str(&value).ok()
}
fn tokenize_string(value: String) -> Option<Vec<String>> {
let trimmed = value.trim();
if trimmed.is_empty() {
return None;
}
Some(trimmed.split(',').map(|s| s.trim().to_string()).collect())
}
/// A tag is the value used in both the `X-Debug-ID` and `X-Source-Tags` headers
/// of tagged ping requests, thus is it must be a valid header value.
///
/// In other words, it must match the regex: "[a-zA-Z0-9-]{1,20}"
///
/// The regex crate isn't used here because it adds to the binary size,
/// and the Glean SDK doesn't use regular expressions anywhere else.
#[allow(clippy::ptr_arg)]
fn validate_tag(value: &String) -> bool {
if value.is_empty() {
log::error!("A tag must have at least one character.");
return false;
}
let mut iter = value.chars();
let mut count = 0;
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('-') | Some('a'..='z') | Some('A'..='Z') | Some('0'..='9') => (),
// An invalid character
Some(c) => {
log::error!("Invalid character '{}' in the tag.", c);
return false;
}
}
count += 1;
if count == 20 {
log::error!("A tag cannot exceed 20 characters.");
return false;
}
}
}
/// Validate the list of source tags.
///
/// This builds upon the existing `validate_tag` function, since all the
/// tags should respect the same rules to make the pipeline happy.
#[allow(clippy::ptr_arg)]
fn validate_source_tags(tags: &Vec<String>) -> bool {
if tags.is_empty() {
return false;
}
if tags.len() > GLEAN_MAX_SOURCE_TAGS {
log::error!(
"A list of tags cannot contain more than {} elements.",
GLEAN_MAX_SOURCE_TAGS
);
return false;
}
// Filter out tags starting with "glean". They are reserved.
if tags.iter().any(|s| s.starts_with("glean")) {
log::error!("Tags starting with `glean` are reserved and must not be used.");
return false;
}
tags.iter().all(|x| validate_tag(&x))
}
#[cfg(test)]
mod test {
use super::*;
use std::env;
#[test]
fn debug_option_is_correctly_loaded_from_env() {
env::set_var("GLEAN_TEST_1", "test");
let option: DebugOption<String> = DebugOption::new("GLEAN_TEST_1", Some, None);
assert_eq!(option.get().unwrap(), "test");
}
#[test]
fn debug_option_is_correctly_validated_when_necessary() {
#[allow(clippy::ptr_arg)]
fn validate(value: &String) -> bool {
value == "test"
}
// Invalid values from the env are not set
env::set_var("GLEAN_TEST_2", "invalid");
let mut option: DebugOption<String> =
DebugOption::new("GLEAN_TEST_2", Some, Some(validate));
assert!(option.get().is_none());
// Valid values are set using the `set` function
assert!(option.set("test".into()));
assert_eq!(option.get().unwrap(), "test");
// Invalid values are not set using the `set` function
assert!(!option.set("invalid".into()));
assert_eq!(option.get().unwrap(), "test");
}
#[test]
fn tokenize_string_splits_correctly() {
// Valid list is properly tokenized and spaces are trimmed.
assert_eq!(
Some(vec!["test1".to_string(), "test2".to_string()]),
tokenize_string(" test1, test2 ".to_string())
);
// Empty strings return no item.
assert_eq!(None, tokenize_string("".to_string()));
}
#[test]
fn validates_tag_correctly() {
assert!(validate_tag(&"valid-value".to_string()));
assert!(validate_tag(&"-also-valid-value".to_string()));
assert!(!validate_tag(&"invalid_value".to_string()));
assert!(!validate_tag(&"invalid value".to_string()));
assert!(!validate_tag(&"!nv@lid-val*e".to_string()));
assert!(!validate_tag(
&"invalid-value-because-way-too-long".to_string()
));
assert!(!validate_tag(&"".to_string()));
}
#[test]
fn validates_source_tags_correctly() {
// Empty tags.
assert!(!validate_source_tags(&vec!["".to_string()]));
// Too many tags.
assert!(!validate_source_tags(&vec![
"1".to_string(),
"2".to_string(),
"3".to_string(),
"4".to_string(),
"5".to_string(),
"6".to_string()
]));
// Invalid tags.
assert!(!validate_source_tags(&vec!["!nv@lid-val*e".to_string()]));
// Entries starting with 'glean' are filtered out.
assert!(!validate_source_tags(&vec![
"glean-test1".to_string(),
"test2".to_string()
]));
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! # Debug options
//!
//! The debug options for Glean may be set by calling one of the `set_*` functions
//! or by setting specific environment variables.
//!
//! The environment variables will be read only once when the options are initialized.
//!
//! The possible debugging features available out of the box are:
//!
//! * **Ping logging** - logging the contents of ping requests that are correctly assembled;
//! This may be set by calling glean.set_log_pings(value: bool)
//! or by setting the environment variable GLEAN_LOG_PINGS="true";
//! * **Debug tagging** - Adding the X-Debug-ID header to every ping request,
//! allowing these tagged pings to be sent to the ["Ping Debug Viewer"](https://mozilla.github.io/glean/book/dev/core/internal/debug-pings.html).
//! This may be set by calling glean.set_debug_view_tag(value: &str)
//! or by setting the environment variable GLEAN_DEBUG_VIEW_TAG=<some tag>;
//!
//! Bindings may implement other debugging features, e.g. sending pings on demand.
use std::env;
const GLEAN_LOG_PINGS: &str = "GLEAN_LOG_PINGS";
const GLEAN_DEBUG_VIEW_TAG: &str = "GLEAN_DEBUG_VIEW_TAG";
/// A representation of all of Glean's debug options.
#[derive(Debug)]
pub struct DebugOptions {
/// Option to log the payload of pings that are successfully assembled into a ping request.
pub log_pings: DebugOption<bool>,
/// Option to add the X-Debug-ID header to every ping request.
pub debug_view_tag: DebugOption<String>,
}
impl DebugOptions {
pub fn new() -> Self {
Self {
log_pings: DebugOption::new(GLEAN_LOG_PINGS, None),
debug_view_tag: DebugOption::new(GLEAN_DEBUG_VIEW_TAG, Some(validate_debug_view_tag)),
}
}
}
/// A representation of a debug option,
/// where the value can be set programmatically or come from an environment variable.
#[derive(Debug)]
pub struct DebugOption<T, F = fn(T) -> Option<T>> {
/// The name of the environment variable related to this debug option.
env: String,
/// The actual value of this option.
value: Option<T>,
/// Optional function to validade the value parsed from the environment
/// or passed to the `set` function.
validation: Option<F>,
}
impl<T, F> DebugOption<T, F>
where
T: std::str::FromStr,
F: Fn(T) -> Option<T>,
{
/// Create a new debug option,
/// tries to get the initial value of the option from the environment.
pub fn new(env: &str, validation: Option<F>) -> Self {
let mut option = Self {
env: env.into(),
value: None,
validation,
};
option.set_from_env();
option
}
fn validate(&self, value: T) -> Option<T> {
if let Some(f) = self.validation.as_ref() {
f(value)
} else {
Some(value)
}
}
fn set_from_env(&mut self) {
match env::var(&self.env) {
Ok(env_value) => match T::from_str(&env_value) {
Ok(v) => {
self.set(v);
}
Err(_) => {
log::error!(
"Unable to parse debug option {}={} into {}. Ignoring.",
self.env,
env_value,
std::any::type_name::<T>()
);
}
},
Err(env::VarError::NotUnicode(_)) => {
log::error!("The value of {} is not valid unicode. Ignoring.", self.env)
}
// The other possible error is that the env var is not set,
// which is not an error for us and can safely be ignored.
Err(_) => {}
}
}
/// Tries to set a value for this debug option,
/// returns `true` if successfull.
///
/// Validates the value in case a validation function is available.
pub fn set(&mut self, value: T) -> bool {
let validated = self.validate(value);
if validated.is_some() {
log::info!("Setting the debug option {}.", self.env);
self.value = validated;
return true;
}
log::info!("Invalid value for debug option {}.", self.env);
false
}
/// Gets the value of this debug option.
pub fn get(&self) -> Option<&T> {
self.value.as_ref()
}
}
/// The debug view tag is the value for the `X-Debug-ID` header of tagged ping requests,
/// thus is it must be a valid header value.
///
/// In other words, it must match the regex: "[a-zA-Z0-9-]{1,20}"
///
/// The regex crate isn't used here because it adds to the binary size,
/// and the Glean SDK doesn't use regular expressions anywhere else.
pub fn validate_debug_view_tag(value: String) -> Option<String> {
if value.is_empty() {
log::error!("Debug view tag must have at least one character.");
return None;
}
let mut iter = value.chars();
let mut count = 0;
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return Some(value),
// Valid characters.
Some('-') | Some('a'..='z') | Some('A'..='Z') | Some('0'..='9') => (),
// An invalid character
Some(c) => {
log::error!("Invalid character '{}' in debug view tag.", c);
return None;
}
}
count += 1;
if count == 20 {
log::error!("Debug view tag cannot exceed 20 characters");
return None;
}
}
}
#[cfg(test)]
mod test {
use super::*;
use std::env;
#[test]
fn debug_option_is_correctly_loaded_from_env() {
env::set_var("GLEAN_TEST_1", "test");
let option: DebugOption<String> = DebugOption::new("GLEAN_TEST_1", None);
assert_eq!(option.get().unwrap(), "test");
}
#[test]
fn debug_option_is_correctly_validated_when_necessary() {
fn validate(value: String) -> Option<String> {
if value == "test" {
Some(value)
} else {
None
}
}
// Invalid values from the env are not set
env::set_var("GLEAN_TEST_2", "invalid");
let mut option: DebugOption<String> = DebugOption::new("GLEAN_TEST_2", Some(validate));
assert!(option.get().is_none());
// Valid values are set using the `set` function
assert!(option.set("test".into()));
assert_eq!(option.get().unwrap(), "test");
// Invalid values are not set using the `set` function
assert!(!option.set("invalid".into()));
assert_eq!(option.get().unwrap(), "test");
}
#[test]
fn validates_debug_view_tag_correctly() {
assert!(validate_debug_view_tag("valid-value".to_string()).is_some());
assert!(validate_debug_view_tag("-also-valid-value".to_string()).is_some());
assert!(validate_debug_view_tag("invalid_value".to_string()).is_none());
assert!(validate_debug_view_tag("invalid value".to_string()).is_none());
assert!(validate_debug_view_tag("!nv@lid-val*e".to_string()).is_none());
assert!(
validate_debug_view_tag("invalid-value-because-way-too-long".to_string()).is_none()
);
assert!(validate_debug_view_tag("".to_string()).is_none());
}
}

364
third_party/rust/glean-core/src/error.rs поставляемый
Просмотреть файл

@ -1,189 +1,175 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::ffi::OsString;
use std::fmt::{self, Display};
use std::io;
use std::result;
use ffi_support::{handle_map::HandleError, ExternError};
use rkv::StoreError;
/// A specialized [`Result`] type for this crate's operations.
///
/// This is generally used to avoid writing out [Error] directly and
/// is otherwise a direct mapping to [`Result`].
///
/// [`Result`]: https://doc.rust-lang.org/stable/std/result/enum.Result.html
/// [`Error`]: std.struct.Error.html
pub type Result<T> = result::Result<T, Error>;
/// A list enumerating the categories of errors in this crate.
///
/// [`Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
///
/// This list is intended to grow over time and it is not recommended to
/// exhaustively match against it.
#[derive(Debug)]
#[non_exhaustive]
pub enum ErrorKind {
/// Lifetime conversion failed
Lifetime(i32),
/// FFI-Support error
Handle(HandleError),
/// IO error
IoError(io::Error),
/// IO error
Rkv(StoreError),
/// JSON error
Json(serde_json::error::Error),
/// TimeUnit conversion failed
TimeUnit(i32),
/// MemoryUnit conversion failed
MemoryUnit(i32),
/// HistogramType conversion failed
HistogramType(i32),
/// OsString conversion failed
OsString(OsString),
/// Unknown error
Utf8Error,
/// Glean initialization was attempted with an invalid configuration
InvalidConfig,
/// Glean not initialized
NotInitialized,
/// Ping request body size overflowed
PingBodyOverflow(usize),
}
/// A specialized [`Error`] type for this crate's operations.
///
/// [`Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
#[derive(Debug)]
pub struct Error {
kind: ErrorKind,
}
impl Error {
/// Returns a new UTF-8 error
///
/// This is exposed in order to expose conversion errors on the FFI layer.
pub fn utf8_error() -> Error {
Error {
kind: ErrorKind::Utf8Error,
}
}
/// Indicates an error that no requested global object is initialized
pub fn not_initialized() -> Error {
Error {
kind: ErrorKind::NotInitialized,
}
}
/// Returns the kind of the current error instance.
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
impl std::error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ErrorKind::*;
match self.kind() {
Lifetime(l) => write!(f, "Lifetime conversion from {} failed", l),
Handle(e) => write!(f, "Invalid handle: {}", e),
IoError(e) => write!(f, "An I/O error occurred: {}", e),
Rkv(e) => write!(f, "An Rkv error occurred: {}", e),
Json(e) => write!(f, "A JSON error occurred: {}", e),
TimeUnit(t) => write!(f, "TimeUnit conversion from {} failed", t),
MemoryUnit(m) => write!(f, "MemoryUnit conversion from {} failed", m),
HistogramType(h) => write!(f, "HistogramType conversion from {} failed", h),
OsString(s) => write!(f, "OsString conversion from {:?} failed", s),
Utf8Error => write!(f, "Invalid UTF-8 byte sequence in string"),
InvalidConfig => write!(f, "Invalid Glean configuration provided"),
NotInitialized => write!(f, "Global Glean object missing"),
PingBodyOverflow(s) => write!(
f,
"Ping request body size exceeded maximum size allowed: {}kB.",
s / 1024
),
}
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error { kind }
}
}
impl From<HandleError> for Error {
fn from(error: HandleError) -> Error {
Error {
kind: ErrorKind::Handle(error),
}
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error {
kind: ErrorKind::IoError(error),
}
}
}
impl From<StoreError> for Error {
fn from(error: StoreError) -> Error {
Error {
kind: ErrorKind::Rkv(error),
}
}
}
impl From<Error> for ExternError {
fn from(error: Error) -> ExternError {
ffi_support::ExternError::new_error(ffi_support::ErrorCode::new(42), format!("{}", error))
}
}
impl From<serde_json::error::Error> for Error {
fn from(error: serde_json::error::Error) -> Error {
Error {
kind: ErrorKind::Json(error),
}
}
}
impl From<OsString> for Error {
fn from(error: OsString) -> Error {
Error {
kind: ErrorKind::OsString(error),
}
}
}
/// To satisfy integer conversion done by the macros on the FFI side, we need to be able to turn
/// something infallible into an error.
/// This will never actually be reached, as an integer-to-integer conversion is infallible.
impl From<std::convert::Infallible> for Error {
fn from(_: std::convert::Infallible) -> Error {
unreachable!()
}
}
use std::ffi::OsString;
use std::fmt::{self, Display};
use std::io;
use std::result;
use ffi_support::{handle_map::HandleError, ExternError};
use rkv::error::StoreError;
/// A specialized [`Result`] type for this crate's operations.
///
/// This is generally used to avoid writing out [Error] directly and
/// is otherwise a direct mapping to [`Result`].
///
/// [`Result`]: https://doc.rust-lang.org/stable/std/result/enum.Result.html
/// [`Error`]: std.struct.Error.html
pub type Result<T> = result::Result<T, Error>;
/// A list enumerating the categories of errors in this crate.
///
/// [`Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
///
/// This list is intended to grow over time and it is not recommended to
/// exhaustively match against it.
#[derive(Debug)]
pub enum ErrorKind {
/// Lifetime conversion failed
Lifetime(i32),
/// FFI-Support error
Handle(HandleError),
/// IO error
IoError(io::Error),
/// IO error
Rkv(StoreError),
/// JSON error
Json(serde_json::error::Error),
/// TimeUnit conversion failed
TimeUnit(i32),
/// MemoryUnit conversion failed
MemoryUnit(i32),
/// HistogramType conversion failed
HistogramType(i32),
/// OsString conversion failed
OsString(OsString),
/// Unknown error
Utf8Error,
/// Glean initialization was attempted with an invalid configuration
InvalidConfig,
/// Glean not initialized
NotInitialized,
#[doc(hidden)]
__NonExhaustive,
}
/// A specialized [`Error`] type for this crate's operations.
///
/// [`Error`]: https://doc.rust-lang.org/stable/std/error/trait.Error.html
#[derive(Debug)]
pub struct Error {
kind: ErrorKind,
}
impl Error {
/// Return a new UTF-8 error
///
/// This is exposed in order to expose conversion errors on the FFI layer.
pub fn utf8_error() -> Error {
Error {
kind: ErrorKind::Utf8Error,
}
}
/// Indicates an error that no requested global object is initialized
pub fn not_initialized() -> Error {
Error {
kind: ErrorKind::NotInitialized,
}
}
}
impl std::error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ErrorKind::*;
match &self.kind {
Lifetime(l) => write!(f, "Lifetime conversion from {} failed", l),
Handle(e) => write!(f, "Invalid handle: {}", e),
IoError(e) => write!(f, "An I/O error occurred: {}", e),
Rkv(e) => write!(f, "An Rkv error occurred: {}", e),
Json(e) => write!(f, "A JSON error occurred: {}", e),
TimeUnit(t) => write!(f, "TimeUnit conversion from {} failed", t),
MemoryUnit(m) => write!(f, "MemoryUnit conversion from {} failed", m),
HistogramType(h) => write!(f, "HistogramType conversion from {} failed", h),
OsString(s) => write!(f, "OsString conversion from {:?} failed", s),
Utf8Error => write!(f, "Invalid UTF-8 byte sequence in string"),
InvalidConfig => write!(f, "Invalid Glean configuration provided"),
NotInitialized => write!(f, "Global Glean object missing"),
__NonExhaustive => write!(f, "Unknown error"),
}
}
}
impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error {
Error { kind }
}
}
impl From<HandleError> for Error {
fn from(error: HandleError) -> Error {
Error {
kind: ErrorKind::Handle(error),
}
}
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Error {
Error {
kind: ErrorKind::IoError(error),
}
}
}
impl From<StoreError> for Error {
fn from(error: StoreError) -> Error {
Error {
kind: ErrorKind::Rkv(error),
}
}
}
impl From<Error> for ExternError {
fn from(error: Error) -> ExternError {
ffi_support::ExternError::new_error(ffi_support::ErrorCode::new(42), format!("{}", error))
}
}
impl From<serde_json::error::Error> for Error {
fn from(error: serde_json::error::Error) -> Error {
Error {
kind: ErrorKind::Json(error),
}
}
}
impl From<OsString> for Error {
fn from(error: OsString) -> Error {
Error {
kind: ErrorKind::OsString(error),
}
}
}
/// To satisfy integer conversion done by the macros on the FFI side, we need to be able to turn
/// something infallible into an error.
/// This will never actually be reached, as an integer-to-integer conversion is infallible.
impl From<std::convert::Infallible> for Error {
fn from(_: std::convert::Infallible) -> Error {
unreachable!()
}
}

Просмотреть файл

@ -1,223 +1,211 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! # Error Recording
//!
//! Glean keeps track of errors that occured due to invalid labels or invalid values when recording
//! other metrics.
//!
//! Error counts are stored in labeled counters in the `glean.error` category.
//! The labeled counter metrics that store the errors are defined in the `metrics.yaml` for documentation purposes,
//! but are not actually used directly, since the `send_in_pings` value needs to match the pings of the metric that is erroring (plus the "metrics" ping),
//! not some constant value that we could define in `metrics.yaml`.
use std::convert::TryFrom;
use std::fmt::Display;
use crate::error::{Error, ErrorKind};
use crate::metrics::CounterMetric;
use crate::metrics::{combine_base_identifier_and_label, strip_label};
use crate::CommonMetricData;
use crate::Glean;
use crate::Lifetime;
/// The possible error types for metric recording.
/// Note: the cases in this enum must be kept in sync with the ones
/// in the platform-specific code (e.g. ErrorType.kt) and with the
/// metrics in the registry files.
#[derive(Debug, PartialEq)]
pub enum ErrorType {
/// For when the value to be recorded does not match the metric-specific restrictions
InvalidValue,
/// For when the label of a labeled metric does not match the restrictions
InvalidLabel,
/// For when the metric caught an invalid state while recording
InvalidState,
/// For when the value to be recorded overflows the metric-specific upper range
InvalidOverflow,
}
impl ErrorType {
/// The error type's metric id
pub fn as_str(&self) -> &'static str {
match self {
ErrorType::InvalidValue => "invalid_value",
ErrorType::InvalidLabel => "invalid_label",
ErrorType::InvalidState => "invalid_state",
ErrorType::InvalidOverflow => "invalid_overflow",
}
}
}
impl TryFrom<i32> for ErrorType {
type Error = Error;
fn try_from(value: i32) -> Result<ErrorType, Self::Error> {
match value {
0 => Ok(ErrorType::InvalidValue),
1 => Ok(ErrorType::InvalidLabel),
2 => Ok(ErrorType::InvalidState),
3 => Ok(ErrorType::InvalidOverflow),
e => Err(ErrorKind::Lifetime(e).into()),
}
}
}
/// For a given metric, get the metric in which to record errors
fn get_error_metric_for_metric(meta: &CommonMetricData, error: ErrorType) -> CounterMetric {
// Can't use meta.identifier here, since that might cause infinite recursion
// if the label on this metric needs to report an error.
let identifier = meta.base_identifier();
let name = strip_label(&identifier);
// Record errors in the pings the metric is in, as well as the metrics ping.
let mut send_in_pings = meta.send_in_pings.clone();
let ping_name = "metrics".to_string();
if !send_in_pings.contains(&ping_name) {
send_in_pings.push(ping_name);
}
CounterMetric::new(CommonMetricData {
name: combine_base_identifier_and_label(error.as_str(), name),
category: "glean.error".into(),
lifetime: Lifetime::Ping,
send_in_pings,
..Default::default()
})
}
/// Records an error into Glean.
///
/// Errors are recorded as labeled counters in the `glean.error` category.
///
/// *Note*: We do make assumptions here how labeled metrics are encoded, namely by having the name
/// `<name>/<label>`.
/// Errors do not adhere to the usual "maximum label" restriction.
///
/// # Arguments
///
/// * `glean` - The Glean instance containing the database
/// * `meta` - The metric's meta data
/// * `error` - The error type to record
/// * `message` - The message to log. This message is not sent with the ping.
/// It does not need to include the metric id, as that is automatically prepended to the message.
/// * `num_errors` - The number of errors of the same type to report.
pub fn record_error<O: Into<Option<i32>>>(
glean: &Glean,
meta: &CommonMetricData,
error: ErrorType,
message: impl Display,
num_errors: O,
) {
let metric = get_error_metric_for_metric(meta, error);
log::warn!("{}: {}", meta.base_identifier(), message);
let to_report = num_errors.into().unwrap_or(1);
debug_assert!(to_report > 0);
metric.add(glean, to_report);
}
/// Gets the number of recorded errors for the given metric and error type.
///
/// *Notes: This is a **test-only** API, but we need to expose it to be used in integration tests.
///
/// # Arguments
///
/// * `glean` - The Glean object holding the database
/// * `meta` - The metadata of the metric instance
/// * `error` - The type of error
///
/// # Returns
///
/// The number of errors reported.
pub fn test_get_num_recorded_errors(
glean: &Glean,
meta: &CommonMetricData,
error: ErrorType,
ping_name: Option<&str>,
) -> Result<i32, String> {
let use_ping_name = ping_name.unwrap_or(&meta.send_in_pings[0]);
let metric = get_error_metric_for_metric(meta, error);
metric.test_get_value(glean, use_ping_name).ok_or_else(|| {
format!(
"No error recorded for {} in '{}' store",
meta.base_identifier(),
use_ping_name
)
})
}
#[cfg(test)]
mod test {
use super::*;
use crate::metrics::*;
use crate::tests::new_glean;
#[test]
fn error_type_i32_mapping() {
let error: ErrorType = std::convert::TryFrom::try_from(0).unwrap();
assert_eq!(error, ErrorType::InvalidValue);
let error: ErrorType = std::convert::TryFrom::try_from(1).unwrap();
assert_eq!(error, ErrorType::InvalidLabel);
let error: ErrorType = std::convert::TryFrom::try_from(2).unwrap();
assert_eq!(error, ErrorType::InvalidState);
let error: ErrorType = std::convert::TryFrom::try_from(3).unwrap();
assert_eq!(error, ErrorType::InvalidOverflow);
}
#[test]
fn recording_of_all_error_types() {
let (glean, _t) = new_glean(None);
let string_metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into(), "store2".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
let expected_invalid_values_errors: i32 = 1;
let expected_invalid_labels_errors: i32 = 2;
record_error(
&glean,
string_metric.meta(),
ErrorType::InvalidValue,
"Invalid value",
None,
);
record_error(
&glean,
string_metric.meta(),
ErrorType::InvalidLabel,
"Invalid label",
expected_invalid_labels_errors,
);
for store in &["store1", "store2", "metrics"] {
assert_eq!(
Ok(expected_invalid_values_errors),
test_get_num_recorded_errors(
&glean,
string_metric.meta(),
ErrorType::InvalidValue,
Some(store)
)
);
assert_eq!(
Ok(expected_invalid_labels_errors),
test_get_num_recorded_errors(
&glean,
string_metric.meta(),
ErrorType::InvalidLabel,
Some(store)
)
);
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! # Error Recording
//!
//! Glean keeps track of errors that occured due to invalid labels or invalid values when recording
//! other metrics.
//!
//! Error counts are stored in labeled counters in the `glean.error` category.
//! The labeled counter metrics that store the errors are defined in the `metrics.yaml` for documentation purposes,
//! but are not actually used directly, since the `send_in_pings` value needs to match the pings of the metric that is erroring (plus the "metrics" ping),
//! not some constant value that we could define in `metrics.yaml`.
use std::convert::TryFrom;
use std::fmt::Display;
use crate::error::{Error, ErrorKind};
use crate::metrics::CounterMetric;
use crate::metrics::{combine_base_identifier_and_label, strip_label};
use crate::CommonMetricData;
use crate::Glean;
use crate::Lifetime;
/// The possible error types for metric recording.
/// Note: the cases in this enum must be kept in sync with the ones
/// in the platform-specific code (e.g. ErrorType.kt) and with the
/// metrics in the registry files.
#[derive(Debug)]
pub enum ErrorType {
/// For when the value to be recorded does not match the metric-specific restrictions
InvalidValue,
/// For when the label of a labeled metric does not match the restrictions
InvalidLabel,
/// For when the metric caught an invalid state while recording
InvalidState,
/// For when the value to be recorded overflows the metric-specific upper range
InvalidOverflow,
}
impl ErrorType {
/// The error type's metric id
pub fn as_str(&self) -> &'static str {
match self {
ErrorType::InvalidValue => "invalid_value",
ErrorType::InvalidLabel => "invalid_label",
ErrorType::InvalidState => "invalid_state",
ErrorType::InvalidOverflow => "invalid_overflow",
}
}
}
impl TryFrom<i32> for ErrorType {
type Error = Error;
fn try_from(value: i32) -> Result<ErrorType, Self::Error> {
match value {
0 => Ok(ErrorType::InvalidValue),
1 => Ok(ErrorType::InvalidLabel),
2 => Ok(ErrorType::InvalidState),
4 => Ok(ErrorType::InvalidOverflow),
e => Err(ErrorKind::Lifetime(e).into()),
}
}
}
/// For a given metric, get the metric in which to record errors
fn get_error_metric_for_metric(meta: &CommonMetricData, error: ErrorType) -> CounterMetric {
// Can't use meta.identifier here, since that might cause infinite recursion
// if the label on this metric needs to report an error.
let identifier = meta.base_identifier();
let name = strip_label(&identifier);
// Record errors in the pings the metric is in, as well as the metrics ping.
let mut send_in_pings = meta.send_in_pings.clone();
let ping_name = "metrics".to_string();
if !send_in_pings.contains(&ping_name) {
send_in_pings.push(ping_name);
}
CounterMetric::new(CommonMetricData {
name: combine_base_identifier_and_label(error.as_str(), name),
category: "glean.error".into(),
lifetime: Lifetime::Ping,
send_in_pings,
..Default::default()
})
}
/// Records an error into Glean.
///
/// Errors are recorded as labeled counters in the `glean.error` category.
///
/// *Note*: We do make assumptions here how labeled metrics are encoded, namely by having the name
/// `<name>/<label>`.
/// Errors do not adhere to the usual "maximum label" restriction.
///
/// ## Arguments
///
/// * glean - The Glean instance containing the database
/// * meta - The metric's meta data
/// * error - The error type to record
/// * message - The message to log. This message is not sent with the ping.
/// It does not need to include the metric id, as that is automatically prepended to the message.
/// * num_errors - The number of errors of the same type to report.
pub fn record_error<O: Into<Option<i32>>>(
glean: &Glean,
meta: &CommonMetricData,
error: ErrorType,
message: impl Display,
num_errors: O,
) {
let metric = get_error_metric_for_metric(meta, error);
log::warn!("{}: {}", meta.base_identifier(), message);
let to_report = num_errors.into().unwrap_or(1);
debug_assert!(to_report > 0);
metric.add(glean, to_report);
}
/// Get the number of recorded errors for the given metric and error type.
///
/// *Notes: This is a **test-only** API, but we need to expose it to be used in integration tests.
///
/// ## Arguments
///
/// * glean - The Glean object holding the database
/// * meta - The metadata of the metric instance
/// * error - The type of error
///
/// ## Return value
///
/// The number of errors reported
pub fn test_get_num_recorded_errors(
glean: &Glean,
meta: &CommonMetricData,
error: ErrorType,
ping_name: Option<&str>,
) -> Result<i32, String> {
let use_ping_name = ping_name.unwrap_or(&meta.send_in_pings[0]);
let metric = get_error_metric_for_metric(meta, error);
metric.test_get_value(glean, use_ping_name).ok_or_else(|| {
format!(
"No error recorded for {} in '{}' store",
meta.base_identifier(),
use_ping_name
)
})
}
#[cfg(test)]
mod test {
use super::*;
use crate::metrics::*;
use crate::tests::new_glean;
#[test]
fn recording_of_all_error_types() {
let (glean, _t) = new_glean(None);
let string_metric = StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
send_in_pings: vec!["store1".into(), "store2".into()],
disabled: false,
lifetime: Lifetime::User,
..Default::default()
});
let expected_invalid_values_errors: i32 = 1;
let expected_invalid_labels_errors: i32 = 2;
record_error(
&glean,
string_metric.meta(),
ErrorType::InvalidValue,
"Invalid value",
None,
);
record_error(
&glean,
string_metric.meta(),
ErrorType::InvalidLabel,
"Invalid label",
expected_invalid_labels_errors,
);
for store in &["store1", "store2", "metrics"] {
assert_eq!(
Ok(expected_invalid_values_errors),
test_get_num_recorded_errors(
&glean,
string_metric.meta(),
ErrorType::InvalidValue,
Some(store)
)
);
assert_eq!(
Ok(expected_invalid_labels_errors),
test_get_num_recorded_errors(
&glean,
string_metric.meta(),
ErrorType::InvalidLabel,
Some(store)
)
);
}
}
}

Просмотреть файл

@ -1,497 +1,453 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use std::fs;
use std::fs::{create_dir_all, File, OpenOptions};
use std::io::BufRead;
use std::io::BufReader;
use std::io::Write;
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
use std::sync::RwLock;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
use crate::CommonMetricData;
use crate::Glean;
use crate::Result;
/// Represents the recorded data for a single event.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct RecordedEvent {
/// The timestamp of when the event was recorded.
///
/// This allows to order events from a single process run.
pub timestamp: u64,
/// The event's category.
///
/// This is defined by users in the metrics file.
pub category: String,
/// The event's name.
///
/// This is defined by users in the metrics file.
pub name: String,
/// A map of all extra data values.
///
/// The set of allowed extra keys is defined by users in the metrics file.
#[serde(skip_serializing_if = "Option::is_none")]
pub extra: Option<HashMap<String, String>>,
}
impl RecordedEvent {
/// Serialize an event to JSON, adjusting its timestamp relative to a base timestamp
fn serialize_relative(&self, timestamp_offset: u64) -> JsonValue {
json!(&RecordedEvent {
timestamp: self.timestamp - timestamp_offset,
category: self.category.clone(),
name: self.name.clone(),
extra: self.extra.clone(),
})
}
}
/// This struct handles the in-memory and on-disk storage logic for events.
///
/// So that the data survives shutting down of the application, events are stored
/// in an append-only file on disk, in addition to the store in memory. Each line
/// of this file records a single event in JSON, exactly as it will be sent in the
/// ping. There is one file per store.
///
/// When restarting the application, these on-disk files are checked, and if any are
/// found, they are loaded, queued for sending and flushed immediately before any
/// further events are collected. This is because the timestamps for these events
/// may have come from a previous boot of the device, and therefore will not be
/// compatible with any newly-collected events.
#[derive(Debug)]
pub struct EventDatabase {
/// Path to directory of on-disk event files
pub path: PathBuf,
/// The in-memory list of events
event_stores: RwLock<HashMap<String, Vec<RecordedEvent>>>,
/// A lock to be held when doing operations on the filesystem
file_lock: RwLock<()>,
}
impl EventDatabase {
/// Creates a new event database.
///
/// # Arguments
///
/// * `data_path` - The directory to store events in. A new directory
/// * `events` - will be created inside of this directory.
pub fn new(data_path: &str) -> Result<Self> {
let path = Path::new(data_path).join("events");
create_dir_all(&path)?;
Ok(Self {
path,
event_stores: RwLock::new(HashMap::new()),
file_lock: RwLock::new(()),
})
}
/// Initializes events storage after Glean is fully initialized and ready to send pings.
///
/// This must be called once on application startup, e.g. from
/// [Glean.initialize], but after we are ready to send pings, since this
/// could potentially collect and send pings.
///
/// If there are any events queued on disk, it loads them into memory so
/// that the memory and disk representations are in sync.
///
/// Secondly, if this is the first time the application has been run since
/// rebooting, any pings containing events are assembled into pings and cleared
/// immediately, since their timestamps won't be compatible with the timestamps
/// we would create during this boot of the device.
///
/// # Arguments
///
/// * `glean` - The Glean instance.
///
/// # Returns
///
/// Whether at least one ping was generated.
pub fn flush_pending_events_on_startup(&self, glean: &Glean) -> bool {
match self.load_events_from_disk() {
Ok(_) => self.send_all_events(glean),
Err(err) => {
log::error!("Error loading events from disk: {}", err);
false
}
}
}
fn load_events_from_disk(&self) -> Result<()> {
let _lock = self.file_lock.read().unwrap(); // safe unwrap, only error case is poisoning
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
for entry in fs::read_dir(&self.path)? {
let entry = entry?;
if entry.file_type()?.is_file() {
let store_name = entry.file_name().into_string()?;
let file = BufReader::new(File::open(entry.path())?);
db.insert(
store_name,
file.lines()
.filter_map(|line| line.ok())
.filter_map(|line| serde_json::from_str::<RecordedEvent>(&line).ok())
.collect(),
);
}
}
Ok(())
}
fn send_all_events(&self, glean: &Glean) -> bool {
let store_names = {
let db = self.event_stores.read().unwrap(); // safe unwrap, only error case is poisoning
db.keys().cloned().collect::<Vec<String>>()
};
let mut ping_sent = false;
for store_name in store_names {
if let Err(err) = glean.submit_ping_by_name(&store_name, Some("startup")) {
log::error!(
"Error flushing existing events to the '{}' ping: {}",
store_name,
err
);
} else {
ping_sent = true;
}
}
ping_sent
}
/// Records an event in the desired stores.
///
/// # Arguments
///
/// * `glean` - The Glean instance.
/// * `meta` - The metadata about the event metric. Used to get the category,
/// name and stores for the metric.
/// * `timestamp` - The timestamp of the event, in milliseconds. Must use a
/// monotonically increasing timer (this value is obtained on the
/// platform-specific side).
/// * `extra` - Extra data values, mapping strings to strings.
pub fn record(
&self,
glean: &Glean,
meta: &CommonMetricData,
timestamp: u64,
extra: Option<HashMap<String, String>>,
) {
// If upload is disabled we don't want to record.
if !glean.is_upload_enabled() {
return;
}
// Create RecordedEvent object, and its JSON form for serialization
// on disk.
let event = RecordedEvent {
timestamp,
category: meta.category.to_string(),
name: meta.name.to_string(),
extra,
};
let event_json = serde_json::to_string(&event).unwrap(); // safe unwrap, event can always be serialized
// Store the event in memory and on disk to each of the stores.
let mut stores_to_submit: Vec<&str> = Vec::new();
{
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
for store_name in meta.send_in_pings.iter() {
let store = db.entry(store_name.to_string()).or_insert_with(Vec::new);
store.push(event.clone());
self.write_event_to_disk(store_name, &event_json);
if store.len() == glean.get_max_events() {
stores_to_submit.push(&store_name);
}
}
}
// If any of the event stores reached maximum size, submit the pings
// containing those events immediately.
for store_name in stores_to_submit {
if let Err(err) = glean.submit_ping_by_name(store_name, Some("max_capacity")) {
log::error!(
"Got more than {} events, but could not send {} ping: {}",
glean.get_max_events(),
store_name,
err
);
}
}
}
/// Writes an event to a single store on disk.
///
/// # Arguments
///
/// * `store_name` - The name of the store.
/// * `event_json` - The event content, as a single-line JSON-encoded string.
fn write_event_to_disk(&self, store_name: &str, event_json: &str) {
let _lock = self.file_lock.write().unwrap(); // safe unwrap, only error case is poisoning
if let Err(err) = OpenOptions::new()
.create(true)
.append(true)
.open(self.path.join(store_name))
.and_then(|mut file| writeln!(file, "{}", event_json))
{
log::error!("IO error writing event to store '{}': {}", store_name, err);
}
}
/// Gets a snapshot of the stored event data as a JsonValue.
///
/// # Arguments
///
/// * `store_name` - The name of the desired store.
/// * `clear_store` - Whether to clear the store after snapshotting.
///
/// # Returns
///
/// A array of events, JSON encoded, if any. Otherwise `None`.
pub fn snapshot_as_json(&self, store_name: &str, clear_store: bool) -> Option<JsonValue> {
let result = {
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
db.get_mut(&store_name.to_string()).and_then(|store| {
if !store.is_empty() {
// Timestamps may have been recorded out-of-order, so sort the events
// by the timestamp.
// We can't insert events in order as-we-go, because we also append
// events to a file on disk, where this would be expensive. Best to
// handle this in every case (whether events came from disk or memory)
// in a single location.
store.sort_by(|a, b| a.timestamp.cmp(&b.timestamp));
let first_timestamp = store[0].timestamp;
Some(JsonValue::from_iter(
store.iter().map(|e| e.serialize_relative(first_timestamp)),
))
} else {
log::error!("Unexpectly got empty event store for '{}'", store_name);
None
}
})
};
if clear_store {
self.event_stores
.write()
.unwrap() // safe unwrap, only error case is poisoning
.remove(&store_name.to_string());
let _lock = self.file_lock.write().unwrap(); // safe unwrap, only error case is poisoning
if let Err(err) = fs::remove_file(self.path.join(store_name)) {
match err.kind() {
std::io::ErrorKind::NotFound => {
// silently drop this error, the file was already non-existing
}
_ => log::error!("Error removing events queue file '{}': {}", store_name, err),
}
}
}
result
}
/// Clears all stored events, both in memory and on-disk.
pub fn clear_all(&self) -> Result<()> {
// safe unwrap, only error case is poisoning
self.event_stores.write().unwrap().clear();
// safe unwrap, only error case is poisoning
let _lock = self.file_lock.write().unwrap();
std::fs::remove_dir_all(&self.path)?;
create_dir_all(&self.path)?;
Ok(())
}
/// **Test-only API (exported for FFI purposes).**
///
/// Returns whether there are any events currently stored for the given even
/// metric.
///
/// This doesn't clear the stored value.
pub fn test_has_value<'a>(&'a self, meta: &'a CommonMetricData, store_name: &str) -> bool {
self.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
.get(&store_name.to_string())
.into_iter()
.flatten()
.any(|event| event.name == meta.name && event.category == meta.category)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the vector of currently stored events for the given event metric in
/// the given store.
///
/// This doesn't clear the stored value.
pub fn test_get_value<'a>(
&'a self,
meta: &'a CommonMetricData,
store_name: &str,
) -> Option<Vec<RecordedEvent>> {
let value: Vec<RecordedEvent> = self
.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
.get(&store_name.to_string())
.into_iter()
.flatten()
.filter(|event| event.name == meta.name && event.category == meta.category)
.cloned()
.collect();
if !value.is_empty() {
Some(value)
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::tests::new_glean;
use crate::CommonMetricData;
#[test]
fn handle_truncated_events_on_disk() {
let t = tempfile::tempdir().unwrap();
{
let db = EventDatabase::new(&t.path().display().to_string()).unwrap();
db.write_event_to_disk("events", "{\"timestamp\": 500");
db.write_event_to_disk("events", "{\"timestamp\"");
db.write_event_to_disk(
"events",
"{\"timestamp\": 501, \"category\": \"ui\", \"name\": \"click\"}",
);
}
{
let db = EventDatabase::new(&t.path().display().to_string()).unwrap();
db.load_events_from_disk().unwrap();
let events = &db.event_stores.read().unwrap()["events"];
assert_eq!(1, events.len());
}
}
#[test]
fn stable_serialization() {
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: Some(data),
};
let event_empty_json = ::serde_json::to_string_pretty(&event_empty).unwrap();
let event_data_json = ::serde_json::to_string_pretty(&event_data).unwrap();
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
}
#[test]
fn deserialize_existing_data() {
let event_empty_json = r#"
{
"timestamp": 2,
"category": "cat",
"name": "name"
}
"#;
let event_data_json = r#"
{
"timestamp": 2,
"category": "cat",
"name": "name",
"extra": {
"a key": "a value"
}
}
"#;
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: Some(data),
};
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
}
#[test]
fn doesnt_record_when_upload_is_disabled() {
let (mut glean, dir) = new_glean(None);
let db = EventDatabase::new(dir.path().to_str().unwrap()).unwrap();
let test_storage = "test-storage";
let test_category = "category";
let test_name = "name";
let test_timestamp = 2;
let test_meta = CommonMetricData::new(test_category, test_name, test_storage);
let event_data = RecordedEvent {
timestamp: test_timestamp,
category: test_category.to_string(),
name: test_name.to_string(),
extra: None,
};
// Upload is not yet disabled,
// so let's check that everything is getting recorded as expected.
db.record(&glean, &test_meta, 2, None);
{
let event_stores = db.event_stores.read().unwrap();
assert_eq!(&event_data, &event_stores.get(test_storage).unwrap()[0]);
assert_eq!(event_stores.get(test_storage).unwrap().len(), 1);
}
glean.set_upload_enabled(false);
// Now that upload is disabled, let's check nothing is recorded.
db.record(&glean, &test_meta, 2, None);
{
let event_stores = db.event_stores.read().unwrap();
assert_eq!(event_stores.get(test_storage).unwrap().len(), 1);
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use std::fs;
use std::fs::{create_dir_all, File, OpenOptions};
use std::io::BufRead;
use std::io::BufReader;
use std::io::Write;
use std::iter::FromIterator;
use std::path::{Path, PathBuf};
use std::sync::RwLock;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
use crate::CommonMetricData;
use crate::Glean;
use crate::Result;
/// Represents the recorded data for a single event.
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct RecordedEvent {
/// The timestamp of when the event was recorded.
///
/// This allows to order events from a single process run.
pub timestamp: u64,
/// The event's category.
///
/// This is defined by users in the metrics file.
pub category: String,
/// The event's name.
///
/// This is defined by users in the metrics file.
pub name: String,
/// A map of all extra data values.
///
/// The set of allowed extra keys is defined by users in the metrics file.
#[serde(skip_serializing_if = "Option::is_none")]
pub extra: Option<HashMap<String, String>>,
}
impl RecordedEvent {
/// Serialize an event to JSON, adjusting its timestamp relative to a base timestamp
fn serialize_relative(&self, timestamp_offset: u64) -> JsonValue {
json!(&RecordedEvent {
timestamp: self.timestamp - timestamp_offset,
category: self.category.clone(),
name: self.name.clone(),
extra: self.extra.clone(),
})
}
}
/// This struct handles the in-memory and on-disk storage logic for events.
///
/// So that the data survives shutting down of the application, events are stored
/// in an append-only file on disk, in addition to the store in memory. Each line
/// of this file records a single event in JSON, exactly as it will be sent in the
/// ping. There is one file per store.
///
/// When restarting the application, these on-disk files are checked, and if any are
/// found, they are loaded, queued for sending and flushed immediately before any
/// further events are collected. This is because the timestamps for these events
/// may have come from a previous boot of the device, and therefore will not be
/// compatible with any newly-collected events.
#[derive(Debug)]
pub struct EventDatabase {
/// Path to directory of on-disk event files
pub path: PathBuf,
/// The in-memory list of events
event_stores: RwLock<HashMap<String, Vec<RecordedEvent>>>,
/// A lock to be held when doing operations on the filesystem
file_lock: RwLock<()>,
}
impl EventDatabase {
/// Create a new event database.
///
/// # Arguments
///
/// * `data_path` - The directory to store events in. A new directory
/// `events` will be created inside of this directory.
pub fn new(data_path: &str) -> Result<Self> {
let path = Path::new(data_path).join("events");
create_dir_all(&path)?;
Ok(Self {
path,
event_stores: RwLock::new(HashMap::new()),
file_lock: RwLock::new(()),
})
}
/// Initialize events storage after Glean is fully initialized and ready to
/// send pings. This must be called once on application startup, e.g. from
/// [Glean.initialize], but after we are ready to send pings, since this
/// could potentially collect and send pings.
///
/// If there are any events queued on disk, it loads them into memory so
/// that the memory and disk representations are in sync.
///
/// Secondly, if this is the first time the application has been run since
/// rebooting, any pings containing events are assembled into pings and cleared
/// immediately, since their timestamps won't be compatible with the timestamps
/// we would create during this boot of the device.
///
/// # Arguments
///
/// * `glean` - The Glean instance.
///
/// # Return value
///
/// `true` if at least one ping was generated, `false` otherwise.
pub fn flush_pending_events_on_startup(&self, glean: &Glean) -> bool {
match self.load_events_from_disk() {
Ok(_) => self.send_all_events(glean),
Err(err) => {
log::error!("Error loading events from disk: {}", err);
false
}
}
}
fn load_events_from_disk(&self) -> Result<()> {
let _lock = self.file_lock.read().unwrap(); // safe unwrap, only error case is poisoning
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
for entry in fs::read_dir(&self.path)? {
let entry = entry?;
if entry.file_type()?.is_file() {
let store_name = entry.file_name().into_string()?;
let file = BufReader::new(File::open(entry.path())?);
db.insert(
store_name,
file.lines()
.filter_map(|line| line.ok())
.filter_map(|line| serde_json::from_str::<RecordedEvent>(&line).ok())
.collect(),
);
}
}
Ok(())
}
fn send_all_events(&self, glean: &Glean) -> bool {
let store_names = {
let db = self.event_stores.read().unwrap(); // safe unwrap, only error case is poisoning
db.keys().cloned().collect::<Vec<String>>()
};
let mut ping_sent = false;
for store_name in store_names {
if let Err(err) = glean.submit_ping_by_name(&store_name, Some("startup")) {
log::error!(
"Error flushing existing events to the '{}' ping: {}",
store_name,
err
);
} else {
ping_sent = true;
}
}
ping_sent
}
/// Record an event in the desired stores.
///
/// # Arguments
///
/// * `glean` - The Glean instance.
/// * `meta` - The metadata about the event metric. Used to get the category,
/// name and stores for the metric.
/// * `timestamp` - The timestamp of the event, in milliseconds. Must use a
/// monotonically increasing timer (this value is obtained on the
/// platform-specific side).
/// * `extra` - Extra data values, mapping strings to strings.
pub fn record(
&self,
glean: &Glean,
meta: &CommonMetricData,
timestamp: u64,
extra: Option<HashMap<String, String>>,
) {
// Create RecordedEvent object, and its JSON form for serialization
// on disk.
let event = RecordedEvent {
timestamp,
category: meta.category.to_string(),
name: meta.name.to_string(),
extra,
};
let event_json = serde_json::to_string(&event).unwrap(); // safe unwrap, event can always be serialized
// Store the event in memory and on disk to each of the stores.
let mut stores_to_submit: Vec<&str> = Vec::new();
{
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
for store_name in meta.send_in_pings.iter() {
let store = db.entry(store_name.to_string()).or_insert_with(Vec::new);
store.push(event.clone());
self.write_event_to_disk(store_name, &event_json);
if store.len() == glean.get_max_events() {
stores_to_submit.push(&store_name);
}
}
}
// If any of the event stores reached maximum size, submit the pings
// containing those events immediately.
for store_name in stores_to_submit {
if let Err(err) = glean.submit_ping_by_name(store_name, Some("max_capacity")) {
log::error!(
"Got more than {} events, but could not send {} ping: {}",
glean.get_max_events(),
store_name,
err
);
}
}
}
/// Writes an event to a single store on disk.
///
/// # Arguments
///
/// * `store_name` - The name of the store.
/// * `event_json` - The event content, as a single-line JSON-encoded string.
fn write_event_to_disk(&self, store_name: &str, event_json: &str) {
let _lock = self.file_lock.write().unwrap(); // safe unwrap, only error case is poisoning
if let Err(err) = OpenOptions::new()
.create(true)
.append(true)
.open(self.path.join(store_name))
.and_then(|mut file| writeln!(file, "{}", event_json))
{
log::error!("IO error writing event to store '{}': {}", store_name, err);
}
}
/// Get a snapshot of the stored event data as a JsonValue.
///
/// # Arguments
///
/// * `store_name` - The name of the desired store.
/// * `clear_store` - Whether to clear the store after snapshotting.
///
/// # Returns
///
/// The an array of events, JSON encoded, if any.
pub fn snapshot_as_json(&self, store_name: &str, clear_store: bool) -> Option<JsonValue> {
let result = {
let mut db = self.event_stores.write().unwrap(); // safe unwrap, only error case is poisoning
db.get_mut(&store_name.to_string()).and_then(|store| {
if !store.is_empty() {
// Timestamps may have been recorded out-of-order, so sort the events
// by the timestamp.
// We can't insert events in order as-we-go, because we also append
// events to a file on disk, where this would be expensive. Best to
// handle this in every case (whether events came from disk or memory)
// in a single location.
store.sort_by(|a, b| a.timestamp.cmp(&b.timestamp));
let first_timestamp = store[0].timestamp;
Some(JsonValue::from_iter(
store.iter().map(|e| e.serialize_relative(first_timestamp)),
))
} else {
log::error!("Unexpectly got empty event store for '{}'", store_name);
None
}
})
};
if clear_store {
self.event_stores
.write()
.unwrap() // safe unwrap, only error case is poisoning
.remove(&store_name.to_string());
let _lock = self.file_lock.write().unwrap(); // safe unwrap, only error case is poisoning
if let Err(err) = fs::remove_file(self.path.join(store_name)) {
match err.kind() {
std::io::ErrorKind::NotFound => {
// silently drop this error, the file was already non-existing
}
_ => log::error!("Error removing events queue file '{}': {}", store_name, err),
}
}
}
result
}
/// Clear all stored events, both in memory and on-disk.
pub fn clear_all(&self) -> Result<()> {
// safe unwrap, only error case is poisoning
self.event_stores.write().unwrap().clear();
// safe unwrap, only error case is poisoning
let _lock = self.file_lock.write().unwrap();
std::fs::remove_dir_all(&self.path)?;
create_dir_all(&self.path)?;
Ok(())
}
/// **Test-only API (exported for FFI purposes).**
///
/// Return whether there are any events currently stored for the given even
/// metric.
///
/// This doesn't clear the stored value.
pub fn test_has_value<'a>(&'a self, meta: &'a CommonMetricData, store_name: &str) -> bool {
self.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
.get(&store_name.to_string())
.into_iter()
.flatten()
.any(|event| event.name == meta.name && event.category == meta.category)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the vector of currently stored events for the given event metric in
/// the given store.
///
/// This doesn't clear the stored value.
pub fn test_get_value<'a>(
&'a self,
meta: &'a CommonMetricData,
store_name: &str,
) -> Option<Vec<RecordedEvent>> {
let value: Vec<RecordedEvent> = self
.event_stores
.read()
.unwrap() // safe unwrap, only error case is poisoning
.get(&store_name.to_string())
.into_iter()
.flatten()
.filter(|event| event.name == meta.name && event.category == meta.category)
.cloned()
.collect();
if !value.is_empty() {
Some(value)
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn handle_truncated_events_on_disk() {
let t = tempfile::tempdir().unwrap();
{
let db = EventDatabase::new(&t.path().display().to_string()).unwrap();
db.write_event_to_disk("events", "{\"timestamp\": 500");
db.write_event_to_disk("events", "{\"timestamp\"");
db.write_event_to_disk(
"events",
"{\"timestamp\": 501, \"category\": \"ui\", \"name\": \"click\"}",
);
}
{
let db = EventDatabase::new(&t.path().display().to_string()).unwrap();
db.load_events_from_disk().unwrap();
let events = &db.event_stores.read().unwrap()["events"];
assert_eq!(1, events.len());
}
}
#[test]
fn stable_serialization() {
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: Some(data),
};
let event_empty_json = ::serde_json::to_string_pretty(&event_empty).unwrap();
let event_data_json = ::serde_json::to_string_pretty(&event_data).unwrap();
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
}
#[test]
fn deserialize_existing_data() {
let event_empty_json = r#"
{
"timestamp": 2,
"category": "cat",
"name": "name"
}
"#;
let event_data_json = r#"
{
"timestamp": 2,
"category": "cat",
"name": "name",
"extra": {
"a key": "a value"
}
}
"#;
let event_empty = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: None,
};
let mut data = HashMap::new();
data.insert("a key".to_string(), "a value".to_string());
let event_data = RecordedEvent {
timestamp: 2,
category: "cat".to_string(),
name: "name".to_string(),
extra: Some(data),
};
assert_eq!(
event_empty,
serde_json::from_str(&event_empty_json).unwrap()
);
assert_eq!(event_data, serde_json::from_str(&event_data_json).unwrap());
}
}

Просмотреть файл

@ -1,206 +1,206 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use once_cell::sync::OnceCell;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// Create the possible ranges in an exponential distribution from `min` to `max` with
/// `bucket_count` buckets.
///
/// This algorithm calculates the bucket sizes using a natural log approach to get `bucket_count` number of buckets,
/// exponentially spaced between `min` and `max`
///
/// Bucket limits are the minimal bucket value.
/// That means values in a bucket `i` are `bucket[i] <= value < bucket[i+1]`.
/// It will always contain an underflow bucket (`< 1`).
fn exponential_range(min: u64, max: u64, bucket_count: usize) -> Vec<u64> {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let log_max = (max as f64).ln();
let mut ranges = Vec::with_capacity(bucket_count);
let mut current = min;
if current == 0 {
current = 1;
}
// undeflow bucket
ranges.push(0);
ranges.push(current);
for i in 2..bucket_count {
let log_current = (current as f64).ln();
let log_ratio = (log_max - log_current) / (bucket_count - i) as f64;
let log_next = log_current + log_ratio;
let next_value = log_next.exp().round() as u64;
current = if next_value > current {
next_value
} else {
current + 1
};
ranges.push(current);
}
ranges
}
/// An exponential bucketing algorithm.
///
/// Buckets are pre-computed at instantiation with an exponential distribution from `min` to `max`
/// and `bucket_count` buckets.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PrecomputedExponential {
// Don't serialize the (potentially large) array of ranges, instead compute them on first
// access.
#[serde(skip)]
bucket_ranges: OnceCell<Vec<u64>>,
min: u64,
max: u64,
bucket_count: usize,
}
impl Bucketing for PrecomputedExponential {
/// Get the bucket for the sample.
///
/// This uses a binary search to locate the index `i` of the bucket such that:
/// bucket[i] <= sample < bucket[i+1]
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
let limit = match self.ranges().binary_search(&sample) {
// Found an exact match to fit it in
Ok(i) => i,
// Sorted it fits after the bucket's limit, therefore it fits into the previous bucket
Err(i) => i - 1,
};
self.ranges()[limit]
}
fn ranges(&self) -> &[u64] {
// Create the exponential range on first access.
self.bucket_ranges
.get_or_init(|| exponential_range(self.min, self.max, self.bucket_count))
}
}
impl Histogram<PrecomputedExponential> {
/// Creates a histogram with `count` exponential buckets in the range `min` to `max`.
pub fn exponential(
min: u64,
max: u64,
bucket_count: usize,
) -> Histogram<PrecomputedExponential> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: PrecomputedExponential {
bucket_ranges: OnceCell::new(),
min,
max,
bucket_count,
},
}
}
}
#[cfg(test)]
mod test {
use super::*;
const DEFAULT_BUCKET_COUNT: usize = 100;
const DEFAULT_RANGE_MIN: u64 = 0;
const DEFAULT_RANGE_MAX: u64 = 60_000;
#[test]
fn can_count() {
let mut hist = Histogram::exponential(1, 500, 10);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn overflow_values_accumulate_in_the_last_bucket() {
let mut hist =
Histogram::exponential(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
hist.accumulate(DEFAULT_RANGE_MAX + 100);
assert_eq!(1, hist.values[&DEFAULT_RANGE_MAX]);
}
#[test]
fn short_exponential_buckets_are_correct() {
let test_buckets = vec![0, 1, 2, 3, 5, 9, 16, 29, 54, 100];
assert_eq!(test_buckets, exponential_range(1, 100, 10));
// There's always a zero bucket, so we increase the lower limit.
assert_eq!(test_buckets, exponential_range(0, 100, 10));
}
#[test]
fn default_exponential_buckets_are_correct() {
// Hand calculated values using current default range 0 - 60000 and bucket count of 100.
// NOTE: The final bucket, regardless of width, represents the overflow bucket to hold any
// values beyond the maximum (in this case the maximum is 60000)
let test_buckets = vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 21, 23, 25, 28, 31, 34,
38, 42, 46, 51, 56, 62, 68, 75, 83, 92, 101, 111, 122, 135, 149, 164, 181, 200, 221,
244, 269, 297, 328, 362, 399, 440, 485, 535, 590, 651, 718, 792, 874, 964, 1064, 1174,
1295, 1429, 1577, 1740, 1920, 2118, 2337, 2579, 2846, 3140, 3464, 3822, 4217, 4653,
5134, 5665, 6250, 6896, 7609, 8395, 9262, 10219, 11275, 12440, 13726, 15144, 16709,
18436, 20341, 22443, 24762, 27321, 30144, 33259, 36696, 40488, 44672, 49288, 54381,
60000,
];
assert_eq!(
test_buckets,
exponential_range(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT)
);
}
#[test]
fn default_buckets_correctly_accumulate() {
let mut hist =
Histogram::exponential(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
for i in &[1, 10, 100, 1000, 10000] {
hist.accumulate(*i);
}
assert_eq!(11111, hist.sum());
assert_eq!(5, hist.count());
assert_eq!(None, hist.values.get(&0)); // underflow is empty
assert_eq!(1, hist.values[&1]); // bucket_ranges[1] = 1
assert_eq!(1, hist.values[&10]); // bucket_ranges[10] = 10
assert_eq!(1, hist.values[&92]); // bucket_ranges[33] = 92
assert_eq!(1, hist.values[&964]); // bucket_ranges[57] = 964
assert_eq!(1, hist.values[&9262]); // bucket_ranges[80] = 9262
}
#[test]
fn accumulate_large_numbers() {
let mut hist = Histogram::exponential(1, 500, 10);
hist.accumulate(u64::max_value());
hist.accumulate(u64::max_value());
assert_eq!(2, hist.count());
// Saturate before overflowing
assert_eq!(u64::max_value(), hist.sum());
assert_eq!(2, hist.values[&500]);
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use once_cell::sync::OnceCell;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// Create the possible ranges in an exponential distribution from `min` to `max` with
/// `bucket_count` buckets.
///
/// This algorithm calculates the bucket sizes using a natural log approach to get `bucket_count` number of buckets,
/// exponentially spaced between `min` and `max`
///
/// Bucket limits are the minimal bucket value.
/// That means values in a bucket `i` are `bucket[i] <= value < bucket[i+1]`.
/// It will always contain an underflow bucket (`< 1`).
fn exponential_range(min: u64, max: u64, bucket_count: usize) -> Vec<u64> {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let log_max = (max as f64).ln();
let mut ranges = Vec::with_capacity(bucket_count);
let mut current = min;
if current == 0 {
current = 1;
}
// undeflow bucket
ranges.push(0);
ranges.push(current);
for i in 2..bucket_count {
let log_current = (current as f64).ln();
let log_ratio = (log_max - log_current) / (bucket_count - i) as f64;
let log_next = log_current + log_ratio;
let next_value = log_next.exp().round() as u64;
current = if next_value > current {
next_value
} else {
current + 1
};
ranges.push(current);
}
ranges
}
/// An exponential bucketing algorithm.
///
/// Buckets are pre-computed at instantiation with an exponential distribution from `min` to `max`
/// and `bucket_count` buckets.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PrecomputedExponential {
// Don't serialize the (potentially large) array of ranges, instead compute them on first
// access.
#[serde(skip)]
bucket_ranges: OnceCell<Vec<u64>>,
min: u64,
max: u64,
bucket_count: usize,
}
impl Bucketing for PrecomputedExponential {
/// Get the bucket for the sample.
///
/// This uses a binary search to locate the index `i` of the bucket such that:
/// bucket[i] <= sample < bucket[i+1]
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
let limit = match self.ranges().binary_search(&sample) {
// Found an exact match to fit it in
Ok(i) => i,
// Sorted it fits after the bucket's limit, therefore it fits into the previous bucket
Err(i) => i - 1,
};
self.ranges()[limit]
}
fn ranges(&self) -> &[u64] {
// Create the exponential range on first access.
self.bucket_ranges
.get_or_init(|| exponential_range(self.min, self.max, self.bucket_count))
}
}
impl Histogram<PrecomputedExponential> {
/// Create a histogram with `count` exponential buckets in the range `min` to `max`.
pub fn exponential(
min: u64,
max: u64,
bucket_count: usize,
) -> Histogram<PrecomputedExponential> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: PrecomputedExponential {
bucket_ranges: OnceCell::new(),
min,
max,
bucket_count,
},
}
}
}
#[cfg(test)]
mod test {
use super::*;
const DEFAULT_BUCKET_COUNT: usize = 100;
const DEFAULT_RANGE_MIN: u64 = 0;
const DEFAULT_RANGE_MAX: u64 = 60_000;
#[test]
fn can_count() {
let mut hist = Histogram::exponential(1, 500, 10);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn overflow_values_accumulate_in_the_last_bucket() {
let mut hist =
Histogram::exponential(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
hist.accumulate(DEFAULT_RANGE_MAX + 100);
assert_eq!(1, hist.values[&DEFAULT_RANGE_MAX]);
}
#[test]
fn short_exponential_buckets_are_correct() {
let test_buckets = vec![0, 1, 2, 3, 5, 9, 16, 29, 54, 100];
assert_eq!(test_buckets, exponential_range(1, 100, 10));
// There's always a zero bucket, so we increase the lower limit.
assert_eq!(test_buckets, exponential_range(0, 100, 10));
}
#[test]
fn default_exponential_buckets_are_correct() {
// Hand calculated values using current default range 0 - 60000 and bucket count of 100.
// NOTE: The final bucket, regardless of width, represents the overflow bucket to hold any
// values beyond the maximum (in this case the maximum is 60000)
let test_buckets = vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 21, 23, 25, 28, 31, 34,
38, 42, 46, 51, 56, 62, 68, 75, 83, 92, 101, 111, 122, 135, 149, 164, 181, 200, 221,
244, 269, 297, 328, 362, 399, 440, 485, 535, 590, 651, 718, 792, 874, 964, 1064, 1174,
1295, 1429, 1577, 1740, 1920, 2118, 2337, 2579, 2846, 3140, 3464, 3822, 4217, 4653,
5134, 5665, 6250, 6896, 7609, 8395, 9262, 10219, 11275, 12440, 13726, 15144, 16709,
18436, 20341, 22443, 24762, 27321, 30144, 33259, 36696, 40488, 44672, 49288, 54381,
60000,
];
assert_eq!(
test_buckets,
exponential_range(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT)
);
}
#[test]
fn default_buckets_correctly_accumulate() {
let mut hist =
Histogram::exponential(DEFAULT_RANGE_MIN, DEFAULT_RANGE_MAX, DEFAULT_BUCKET_COUNT);
for i in &[1, 10, 100, 1000, 10000] {
hist.accumulate(*i);
}
assert_eq!(11111, hist.sum());
assert_eq!(5, hist.count());
assert_eq!(None, hist.values.get(&0)); // underflow is empty
assert_eq!(1, hist.values[&1]); // bucket_ranges[1] = 1
assert_eq!(1, hist.values[&10]); // bucket_ranges[10] = 10
assert_eq!(1, hist.values[&92]); // bucket_ranges[33] = 92
assert_eq!(1, hist.values[&964]); // bucket_ranges[57] = 964
assert_eq!(1, hist.values[&9262]); // bucket_ranges[80] = 9262
}
#[test]
fn accumulate_large_numbers() {
let mut hist = Histogram::exponential(1, 500, 10);
hist.accumulate(u64::max_value());
hist.accumulate(u64::max_value());
assert_eq!(2, hist.count());
// Saturate before overflowing
assert_eq!(u64::max_value(), hist.sum());
assert_eq!(2, hist.values[&500]);
}
}

Просмотреть файл

@ -1,174 +1,174 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// A functional bucketing algorithm.
///
/// Bucketing is performed by a function, rather than pre-computed buckets.
/// The bucket index of a given sample is determined with the following function:
///
/// i = ⌊n log<sub>base</sub>(𝑥)⌋
///
/// In other words, there are n buckets for each power of `base` magnitude.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Functional {
exponent: f64,
}
impl Functional {
/// Instantiate a new functional bucketing.
fn new(log_base: f64, buckets_per_magnitude: f64) -> Functional {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let exponent = log_base.powf(1.0 / buckets_per_magnitude);
Functional { exponent }
}
/// Maps a sample to a "bucket index" that it belongs in.
/// A "bucket index" is the consecutive integer index of each bucket, useful as a
/// mathematical concept, even though the internal representation is stored and
/// sent using the minimum value in each bucket.
fn sample_to_bucket_index(&self, sample: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
((sample.saturating_add(1)) as f64).log(self.exponent) as u64
}
/// Determines the minimum value of a bucket, given a bucket index.
fn bucket_index_to_bucket_minimum(&self, index: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
self.exponent.powf(index as f64) as u64
}
}
impl Bucketing for Functional {
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
if sample == 0 {
return 0;
}
let index = self.sample_to_bucket_index(sample);
self.bucket_index_to_bucket_minimum(index)
}
fn ranges(&self) -> &[u64] {
unimplemented!("Bucket ranges for functional bucketing are not precomputed")
}
}
impl Histogram<Functional> {
/// Creates a histogram with functional buckets.
pub fn functional(log_base: f64, buckets_per_magnitude: f64) -> Histogram<Functional> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: Functional::new(log_base, buckets_per_magnitude),
}
}
/// Gets a snapshot of all contiguous values.
///
/// **Caution** This is a more specific implementation of `snapshot_values` on functional
/// histograms. `snapshot_values` cannot be used with those, due to buckets not being
/// precomputed.
pub fn snapshot(&self) -> HashMap<u64, u64> {
if self.values.is_empty() {
return HashMap::new();
}
let mut min_key = None;
let mut max_key = None;
// `Iterator#min` and `Iterator#max` would do the same job independently,
// but we want to avoid iterating the keys twice, so we loop ourselves.
for key in self.values.keys() {
let key = *key;
// safe unwrap, we checked it's not none
if min_key.is_none() || key < min_key.unwrap() {
min_key = Some(key);
}
// safe unwrap, we checked it's not none
if max_key.is_none() || key > max_key.unwrap() {
max_key = Some(key);
}
}
// Non-empty values, therefore minimum/maximum exists.
// safe unwraps, we set it at least once.
let min_bucket = self.bucketing.sample_to_bucket_index(min_key.unwrap());
let max_bucket = self.bucketing.sample_to_bucket_index(max_key.unwrap()) + 1;
let mut values = self.values.clone();
for idx in min_bucket..=max_bucket {
// Fill in missing entries.
let min_bucket = self.bucketing.bucket_index_to_bucket_minimum(idx);
let _ = values.entry(min_bucket).or_insert(0);
}
values
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_count() {
let mut hist = Histogram::functional(2.0, 8.0);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn sample_to_bucket_minimum_correctly_rounds_down() {
let hist = Histogram::functional(2.0, 8.0);
// Check each of the first 100 integers, where numerical accuracy of the round-tripping
// is most potentially problematic
for value in 0..100 {
let bucket_minimum = hist.bucketing.sample_to_bucket_minimum(value);
assert!(bucket_minimum <= value);
assert_eq!(
bucket_minimum,
hist.bucketing.sample_to_bucket_minimum(bucket_minimum)
);
}
// Do an exponential sampling of higher numbers
for i in 11..500 {
let value = 1.5f64.powi(i);
let value = value as u64;
let bucket_minimum = hist.bucketing.sample_to_bucket_minimum(value);
assert!(bucket_minimum <= value);
assert_eq!(
bucket_minimum,
hist.bucketing.sample_to_bucket_minimum(bucket_minimum)
);
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::{Bucketing, Histogram};
use crate::util::floating_point_context::FloatingPointContext;
/// A functional bucketing algorithm.
///
/// Bucketing is performed by a function, rather than pre-computed buckets.
/// The bucket index of a given sample is determined with the following function:
///
/// i = ⌊n log<sub>base</sub>(𝑥)⌋
///
/// In other words, there are n buckets for each power of `base` magnitude.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Functional {
exponent: f64,
}
impl Functional {
/// Instantiate a new functional bucketing.
fn new(log_base: f64, buckets_per_magnitude: f64) -> Functional {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
let exponent = log_base.powf(1.0 / buckets_per_magnitude);
Functional { exponent }
}
/// Maps a sample to a "bucket index" that it belongs in.
/// A "bucket index" is the consecutive integer index of each bucket, useful as a
/// mathematical concept, even though the internal representation is stored and
/// sent using the minimum value in each bucket.
fn sample_to_bucket_index(&self, sample: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
((sample + 1) as f64).log(self.exponent) as u64
}
/// Determines the minimum value of a bucket, given a bucket index.
fn bucket_index_to_bucket_minimum(&self, index: u64) -> u64 {
// Set the FPU control flag to the required state within this function
let _fpc = FloatingPointContext::new();
self.exponent.powf(index as f64) as u64
}
}
impl Bucketing for Functional {
fn sample_to_bucket_minimum(&self, sample: u64) -> u64 {
if sample == 0 {
return 0;
}
let index = self.sample_to_bucket_index(sample);
self.bucket_index_to_bucket_minimum(index)
}
fn ranges(&self) -> &[u64] {
unimplemented!("Bucket ranges for functional bucketing are not precomputed")
}
}
impl Histogram<Functional> {
/// Create a histogram with functional buckets.
pub fn functional(log_base: f64, buckets_per_magnitude: f64) -> Histogram<Functional> {
Histogram {
values: HashMap::new(),
count: 0,
sum: 0,
bucketing: Functional::new(log_base, buckets_per_magnitude),
}
}
/// Get a snapshot of all contiguous values.
///
/// **Caution** This is a more specific implementation of `snapshot_values` on functional
/// histograms. `snapshot_values` cannot be used with those, due to buckets not being
/// precomputed.
pub fn snapshot(&self) -> HashMap<u64, u64> {
if self.values.is_empty() {
return HashMap::new();
}
let mut min_key = None;
let mut max_key = None;
// `Iterator#min` and `Iterator#max` would do the same job independently,
// but we want to avoid iterating the keys twice, so we loop ourselves.
for key in self.values.keys() {
let key = *key;
// safe unwrap, we checked it's not none
if min_key.is_none() || key < min_key.unwrap() {
min_key = Some(key);
}
// safe unwrap, we checked it's not none
if max_key.is_none() || key > max_key.unwrap() {
max_key = Some(key);
}
}
// Non-empty values, therefore minimum/maximum exists.
// safe unwraps, we set it at least once.
let min_bucket = self.bucketing.sample_to_bucket_index(min_key.unwrap());
let max_bucket = self.bucketing.sample_to_bucket_index(max_key.unwrap()) + 1;
let mut values = self.values.clone();
for idx in min_bucket..=max_bucket {
// Fill in missing entries.
let min_bucket = self.bucketing.bucket_index_to_bucket_minimum(idx);
let _ = values.entry(min_bucket).or_insert(0);
}
values
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_count() {
let mut hist = Histogram::functional(2.0, 8.0);
assert!(hist.is_empty());
for i in 1..=10 {
hist.accumulate(i);
}
assert_eq!(10, hist.count());
assert_eq!(55, hist.sum());
}
#[test]
fn sample_to_bucket_minimum_correctly_rounds_down() {
let hist = Histogram::functional(2.0, 8.0);
// Check each of the first 100 integers, where numerical accuracy of the round-tripping
// is most potentially problematic
for value in 0..100 {
let bucket_minimum = hist.bucketing.sample_to_bucket_minimum(value);
assert!(bucket_minimum <= value);
assert_eq!(
bucket_minimum,
hist.bucketing.sample_to_bucket_minimum(bucket_minimum)
);
}
// Do an exponential sampling of higher numbers
for i in 11..500 {
let value = 1.5f64.powi(i);
let value = value as u64;
let bucket_minimum = hist.bucketing.sample_to_bucket_minimum(value);
assert!(bucket_minimum <= value);
assert_eq!(
bucket_minimum,
hist.bucketing.sample_to_bucket_minimum(bucket_minimum)
);
}
}
}

Просмотреть файл

@ -71,7 +71,7 @@ impl Bucketing for PrecomputedLinear {
}
impl Histogram<PrecomputedLinear> {
/// Creates a histogram with `bucket_count` linear buckets in the range `min` to `max`.
/// Create a histogram with `bucket_count` linear buckets in the range `min` to `max`.
pub fn linear(min: u64, max: u64, bucket_count: usize) -> Histogram<PrecomputedLinear> {
Histogram {
values: HashMap::new(),

Просмотреть файл

@ -1,139 +1,139 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! A simple histogram implementation for exponential histograms.
use std::collections::HashMap;
use std::convert::TryFrom;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
pub use exponential::PrecomputedExponential;
pub use functional::Functional;
pub use linear::PrecomputedLinear;
mod exponential;
mod functional;
mod linear;
/// Different kinds of histograms.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HistogramType {
/// A histogram with linear distributed buckets.
Linear,
/// A histogram with exponential distributed buckets.
Exponential,
}
impl TryFrom<i32> for HistogramType {
type Error = Error;
fn try_from(value: i32) -> Result<HistogramType, Self::Error> {
match value {
0 => Ok(HistogramType::Linear),
1 => Ok(HistogramType::Exponential),
e => Err(ErrorKind::HistogramType(e).into()),
}
}
}
/// A histogram.
///
/// Stores the counts per bucket and tracks the count of added samples and the total sum.
/// The bucketing algorithm can be changed.
///
/// ## Example
///
/// ```rust,ignore
/// let mut hist = Histogram::exponential(1, 500, 10);
///
/// for i in 1..=10 {
/// hist.accumulate(i);
/// }
///
/// assert_eq!(10, hist.count());
/// assert_eq!(55, hist.sum());
/// ```
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Histogram<B> {
/// Mapping bucket's minimum to sample count.
values: HashMap<u64, u64>,
/// The count of samples added.
count: u64,
/// The total sum of samples.
sum: u64,
/// The bucketing algorithm used.
bucketing: B,
}
/// A bucketing algorithm for histograms.
///
/// It's responsible to calculate the bucket a sample goes into.
/// It can calculate buckets on-the-fly or pre-calculate buckets and re-use that when needed.
pub trait Bucketing {
/// Get the bucket's minimum value the sample falls into.
fn sample_to_bucket_minimum(&self, sample: u64) -> u64;
/// The computed bucket ranges for this bucketing algorithm.
fn ranges(&self) -> &[u64];
}
impl<B: Bucketing> Histogram<B> {
/// Gets the number of buckets in this histogram.
pub fn bucket_count(&self) -> usize {
self.values.len()
}
/// Adds a single value to this histogram.
pub fn accumulate(&mut self, sample: u64) {
let bucket_min = self.bucketing.sample_to_bucket_minimum(sample);
let entry = self.values.entry(bucket_min).or_insert(0);
*entry += 1;
self.sum = self.sum.saturating_add(sample);
self.count += 1;
}
/// Gets the total sum of values recorded in this histogram.
pub fn sum(&self) -> u64 {
self.sum
}
/// Gets the total count of values recorded in this histogram.
pub fn count(&self) -> u64 {
self.count
}
/// Gets the filled values.
pub fn values(&self) -> &HashMap<u64, u64> {
&self.values
}
/// Checks if this histogram recorded any values.
pub fn is_empty(&self) -> bool {
self.count() == 0
}
/// Gets a snapshot of all values from the first bucket until one past the last filled bucket,
/// filling in empty buckets with 0.
pub fn snapshot_values(&self) -> HashMap<u64, u64> {
let mut res = self.values.clone();
let max_bucket = self.values.keys().max().cloned().unwrap_or(0);
for &min_bucket in self.bucketing.ranges() {
// Fill in missing entries.
let _ = res.entry(min_bucket).or_insert(0);
// stop one after the last filled bucket
if min_bucket > max_bucket {
break;
}
}
res
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! A simple histogram implementation for exponential histograms.
use std::collections::HashMap;
use std::convert::TryFrom;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
pub use exponential::PrecomputedExponential;
pub use functional::Functional;
pub use linear::PrecomputedLinear;
mod exponential;
mod functional;
mod linear;
/// Different kinds of histograms.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HistogramType {
/// A histogram with linear distributed buckets.
Linear,
/// A histogram with exponential distributed buckets.
Exponential,
}
impl TryFrom<i32> for HistogramType {
type Error = Error;
fn try_from(value: i32) -> Result<HistogramType, Self::Error> {
match value {
0 => Ok(HistogramType::Linear),
1 => Ok(HistogramType::Exponential),
e => Err(ErrorKind::HistogramType(e).into()),
}
}
}
/// A histogram.
///
/// Stores the counts per bucket and tracks the count of added samples and the total sum.
/// The bucketing algorithm can be changed.
///
/// ## Example
///
/// ```rust,ignore
/// let mut hist = Histogram::exponential(1, 500, 10);
///
/// for i in 1..=10 {
/// hist.accumulate(i);
/// }
///
/// assert_eq!(10, hist.count());
/// assert_eq!(55, hist.sum());
/// ```
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct Histogram<B> {
/// Mapping bucket's minimum to sample count.
values: HashMap<u64, u64>,
/// The count of samples added.
count: u64,
/// The total sum of samples.
sum: u64,
/// The bucketing algorithm used.
bucketing: B,
}
/// A bucketing algorithm for histograms.
///
/// It's responsible to calculate the bucket a sample goes into.
/// It can calculate buckets on-the-fly or pre-calculate buckets and re-use that when needed.
pub trait Bucketing {
/// Get the bucket's minimum value the sample falls into.
fn sample_to_bucket_minimum(&self, sample: u64) -> u64;
/// The computed bucket ranges for this bucketing algorithm.
fn ranges(&self) -> &[u64];
}
impl<B: Bucketing> Histogram<B> {
/// Get the number of buckets in this histogram.
pub fn bucket_count(&self) -> usize {
self.values.len()
}
/// Add a single value to this histogram.
pub fn accumulate(&mut self, sample: u64) {
let bucket_min = self.bucketing.sample_to_bucket_minimum(sample);
let entry = self.values.entry(bucket_min).or_insert(0);
*entry += 1;
self.sum = self.sum.saturating_add(sample);
self.count += 1;
}
/// Get the total sum of values recorded in this histogram.
pub fn sum(&self) -> u64 {
self.sum
}
/// Get the total count of values recorded in this histogram.
pub fn count(&self) -> u64 {
self.count
}
/// Get the filled values.
pub fn values(&self) -> &HashMap<u64, u64> {
&self.values
}
/// Check if this histogram recorded any values.
pub fn is_empty(&self) -> bool {
self.count() == 0
}
/// Get a snapshot of all values from the first bucket until one past the last filled bucket,
/// filling in empty buckets with 0.
pub fn snapshot_values(&self) -> HashMap<u64, u64> {
let mut res = self.values.clone();
let max_bucket = self.values.keys().max().cloned().unwrap_or(0);
for &min_bucket in self.bucketing.ranges() {
// Fill in missing entries.
let _ = res.entry(min_bucket).or_insert(0);
// stop one after the last filled bucket
if min_bucket > max_bucket {
break;
}
}
res
}
}

Просмотреть файл

@ -1,146 +1,67 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use super::{metrics::*, CommonMetricData, Lifetime};
#[derive(Debug)]
pub struct CoreMetrics {
pub client_id: UuidMetric,
pub first_run_date: DatetimeMetric,
pub os: StringMetric,
}
impl CoreMetrics {
pub fn new() -> CoreMetrics {
CoreMetrics {
client_id: UuidMetric::new(CommonMetricData {
name: "client_id".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::User,
disabled: false,
dynamic_label: None,
}),
first_run_date: DatetimeMetric::new(
CommonMetricData {
name: "first_run_date".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::User,
disabled: false,
dynamic_label: None,
},
TimeUnit::Day,
),
os: StringMetric::new(CommonMetricData {
name: "os".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
}
}
}
#[derive(Debug)]
pub struct UploadMetrics {
pub ping_upload_failure: LabeledMetric<CounterMetric>,
pub discarded_exceeding_pings_size: MemoryDistributionMetric,
pub pending_pings_directory_size: MemoryDistributionMetric,
pub deleted_pings_after_quota_hit: CounterMetric,
pub pending_pings: CounterMetric,
}
impl UploadMetrics {
pub fn new() -> UploadMetrics {
UploadMetrics {
ping_upload_failure: LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "ping_upload_failure".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
Some(vec![
"status_code_4xx".into(),
"status_code_5xx".into(),
"status_code_unknown".into(),
"unrecoverable".into(),
"recoverable".into(),
]),
),
discarded_exceeding_pings_size: MemoryDistributionMetric::new(
CommonMetricData {
name: "discarded_exceeding_ping_size".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Kilobyte,
),
pending_pings_directory_size: MemoryDistributionMetric::new(
CommonMetricData {
name: "pending_pings_directory_size".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Kilobyte,
),
deleted_pings_after_quota_hit: CounterMetric::new(CommonMetricData {
name: "deleted_pings_after_quota_hit".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
pending_pings: CounterMetric::new(CommonMetricData {
name: "pending_pings".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
}
}
}
#[derive(Debug)]
pub struct DatabaseMetrics {
pub size: MemoryDistributionMetric,
}
impl DatabaseMetrics {
pub fn new() -> DatabaseMetrics {
DatabaseMetrics {
size: MemoryDistributionMetric::new(
CommonMetricData {
name: "size".into(),
category: "glean.database".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
},
MemoryUnit::Byte,
),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use super::{metrics::*, CommonMetricData, Lifetime};
#[derive(Debug)]
pub struct CoreMetrics {
pub client_id: UuidMetric,
pub first_run_date: DatetimeMetric,
pub os: StringMetric,
pub ping_upload_failure: LabeledMetric<CounterMetric>,
}
impl CoreMetrics {
pub fn new() -> CoreMetrics {
CoreMetrics {
client_id: UuidMetric::new(CommonMetricData {
name: "client_id".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::User,
disabled: false,
dynamic_label: None,
}),
first_run_date: DatetimeMetric::new(
CommonMetricData {
name: "first_run_date".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::User,
disabled: false,
dynamic_label: None,
},
TimeUnit::Day,
),
os: StringMetric::new(CommonMetricData {
name: "os".into(),
category: "".into(),
send_in_pings: vec!["glean_client_info".into()],
lifetime: Lifetime::Application,
disabled: false,
dynamic_label: None,
}),
ping_upload_failure: LabeledMetric::new(
CounterMetric::new(CommonMetricData {
name: "ping_upload_failure".into(),
category: "glean.upload".into(),
send_in_pings: vec!["metrics".into()],
lifetime: Lifetime::Ping,
disabled: false,
dynamic_label: None,
}),
Some(vec![
"status_code_4xx".into(),
"status_code_5xx".into(),
"status_code_unknown".into(),
"unrecoverable".into(),
"recoverable".into(),
]),
),
}
}
}

1778
third_party/rust/glean-core/src/lib.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -26,19 +26,15 @@ impl MetricType for BooleanMetric {
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl BooleanMetric {
/// Creates a new boolean metric.
/// Create a new boolean metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified boolean value.
/// Set to the specified boolean value.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - the value to set.
@ -53,7 +49,7 @@ impl BooleanMetric {
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a boolean.
/// Get the currently stored value as a boolean.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<bool> {

Просмотреть файл

@ -1,92 +1,88 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A counter metric.
///
/// Used to count things.
/// The value can only be incremented, not decremented.
#[derive(Clone, Debug)]
pub struct CounterMetric {
meta: CommonMetricData,
}
impl MetricType for CounterMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl CounterMetric {
/// Creates a new counter metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Increases the counter by `amount`.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
///
/// Logs an error if the `amount` is 0 or negative.
pub fn add(&self, glean: &Glean, amount: i32) {
if !self.should_record(glean) {
return;
}
if amount <= 0 {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
format!("Added negative or zero value {}", amount),
None,
);
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::Counter(old_value)) => {
Metric::Counter(old_value.saturating_add(amount))
}
_ => Metric::Counter(amount),
})
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<i32> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Counter(i)) => Some(i),
_ => None,
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A counter metric.
///
/// Used to count things.
/// The value can only be incremented, not decremented.
#[derive(Clone, Debug)]
pub struct CounterMetric {
meta: CommonMetricData,
}
impl MetricType for CounterMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl CounterMetric {
/// Create a new counter metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Increase the counter by `amount`.
///
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
///
/// Logs an error if the `amount` is 0 or negative.
pub fn add(&self, glean: &Glean, amount: i32) {
if !self.should_record(glean) {
return;
}
if amount <= 0 {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
format!("Added negative or zero value {}", amount),
None,
);
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::Counter(old_value)) => {
Metric::Counter(old_value.saturating_add(amount))
}
_ => Metric::Counter(amount),
})
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<i32> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Counter(i)) => Some(i),
_ => None,
}
}
}

Просмотреть файл

@ -1,186 +1,178 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Bucketing, Histogram, HistogramType};
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A custom distribution metric.
///
/// Memory distributions are used to accumulate and store memory sizes.
#[derive(Debug)]
pub struct CustomDistributionMetric {
meta: CommonMetricData,
range_min: u64,
range_max: u64,
bucket_count: u64,
histogram_type: HistogramType,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot<B: Bucketing>(hist: &Histogram<B>) -> DistributionData {
DistributionData {
values: hist.snapshot_values(),
sum: hist.sum(),
}
}
impl MetricType for CustomDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl CustomDistributionMetric {
/// Creates a new memory distribution metric.
pub fn new(
meta: CommonMetricData,
range_min: u64,
range_max: u64,
bucket_count: u64,
histogram_type: HistogramType,
) -> Self {
Self {
meta,
range_min,
range_max,
bucket_count,
histogram_type,
}
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// # Arguments
///
/// - `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
// Generic accumulation function to handle the different histogram types and count negative
// samples.
fn accumulate<B: Bucketing, F>(
samples: &[i64],
mut hist: Histogram<B>,
metric: F,
) -> (i32, Metric)
where
F: Fn(Histogram<B>) -> Metric,
{
let mut num_negative_samples = 0;
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
hist.accumulate(sample);
}
}
(num_negative_samples, metric(hist))
}
glean.storage().record_with(glean, &self.meta, |old_value| {
let (num_negative, hist) = match self.histogram_type {
HistogramType::Linear => {
let hist = if let Some(Metric::CustomDistributionLinear(hist)) = old_value {
hist
} else {
Histogram::linear(
self.range_min,
self.range_max,
self.bucket_count as usize,
)
};
accumulate(&samples, hist, Metric::CustomDistributionLinear)
}
HistogramType::Exponential => {
let hist = if let Some(Metric::CustomDistributionExponential(hist)) = old_value
{
hist
} else {
Histogram::exponential(
self.range_min,
self.range_max,
self.bucket_count as usize,
)
};
accumulate(&samples, hist, Metric::CustomDistributionExponential)
}
};
num_negative_samples = num_negative;
hist
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored histogram.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
// Boxing the value, in order to return either of the possible buckets
Some(Metric::CustomDistributionExponential(hist)) => Some(snapshot(&hist)),
Some(Metric::CustomDistributionLinear(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Bucketing, Histogram, HistogramType};
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
/// A custom distribution metric.
///
/// Memory distributions are used to accumulate and store memory sizes.
#[derive(Debug)]
pub struct CustomDistributionMetric {
meta: CommonMetricData,
range_min: u64,
range_max: u64,
bucket_count: u64,
histogram_type: HistogramType,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot<B: Bucketing>(hist: &Histogram<B>) -> DistributionData {
DistributionData {
values: hist.snapshot_values(),
sum: hist.sum(),
}
}
impl MetricType for CustomDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl CustomDistributionMetric {
/// Create a new memory distribution metric.
pub fn new(
meta: CommonMetricData,
range_min: u64,
range_max: u64,
bucket_count: u64,
histogram_type: HistogramType,
) -> Self {
Self {
meta,
range_min,
range_max,
bucket_count,
histogram_type,
}
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// ## Arguments
///
/// - `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
let mut num_negative_samples = 0;
// Generic accumulation function to handle the different histogram types and count negative
// samples.
fn accumulate<B: Bucketing, F>(
samples: &[i64],
mut hist: Histogram<B>,
metric: F,
) -> (i32, Metric)
where
F: Fn(Histogram<B>) -> Metric,
{
let mut num_negative_samples = 0;
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
hist.accumulate(sample);
}
}
(num_negative_samples, metric(hist))
}
glean.storage().record_with(glean, &self.meta, |old_value| {
let (num_negative, hist) = match self.histogram_type {
HistogramType::Linear => {
let hist = if let Some(Metric::CustomDistributionLinear(hist)) = old_value {
hist
} else {
Histogram::linear(
self.range_min,
self.range_max,
self.bucket_count as usize,
)
};
accumulate(&samples, hist, Metric::CustomDistributionLinear)
}
HistogramType::Exponential => {
let hist = if let Some(Metric::CustomDistributionExponential(hist)) = old_value
{
hist
} else {
Histogram::exponential(
self.range_min,
self.range_max,
self.bucket_count as usize,
)
};
accumulate(&samples, hist, Metric::CustomDistributionExponential)
}
};
num_negative_samples = num_negative;
hist
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently stored histogram.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
// Boxing the value, in order to return either of the possible buckets
Some(Metric::CustomDistributionExponential(hist)) => Some(snapshot(&hist)),
Some(Metric::CustomDistributionLinear(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}

Просмотреть файл

@ -1,163 +1,157 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![allow(clippy::too_many_arguments)]
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::{get_iso_time_string, local_now_with_offset};
use crate::CommonMetricData;
use crate::Glean;
use chrono::{DateTime, FixedOffset, TimeZone};
/// A datetime type.
///
/// Used to feed data to the `DatetimeMetric`.
pub type Datetime = DateTime<FixedOffset>;
/// A datetime metric.
///
/// Used to record an absolute date and time, such as the time the user first ran
/// the application.
#[derive(Debug)]
pub struct DatetimeMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
}
impl MetricType for DatetimeMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl DatetimeMetric {
/// Creates a new datetime metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self { meta, time_unit }
}
/// Sets the metric to a date/time including the timezone offset.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `year` - the year to set the metric to.
/// * `month` - the month to set the metric to (1-12).
/// * `day` - the day to set the metric to (1-based).
/// * `hour` - the hour to set the metric to.
/// * `minute` - the minute to set the metric to.
/// * `second` - the second to set the metric to.
/// * `nano` - the nanosecond fraction to the last whole second.
/// * `offset_seconds` - the timezone difference, in seconds, for the Eastern
/// Hemisphere. Negative seconds mean Western Hemisphere.
pub fn set_with_details(
&self,
glean: &Glean,
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
nano: u32,
offset_seconds: i32,
) {
if !self.should_record(glean) {
return;
}
let timezone_offset = FixedOffset::east_opt(offset_seconds);
if timezone_offset.is_none() {
let msg = format!("Invalid timezone offset {}. Not recording.", offset_seconds);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
return;
};
let datetime_obj = FixedOffset::east(offset_seconds)
.ymd_opt(year, month, day)
.and_hms_nano_opt(hour, minute, second, nano);
match datetime_obj.single() {
Some(d) => self.set(glean, Some(d)),
_ => {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
"Invalid input data. Not recording.",
None,
);
}
}
}
/// Sets the metric to a date/time which including the timezone offset.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - Some date/time value, with offset, to set the metric to.
/// If none, the current local time is used.
pub fn set(&self, glean: &Glean, value: Option<Datetime>) {
if !self.should_record(glean) {
return;
}
let value = value.unwrap_or_else(local_now_with_offset);
let value = Metric::Datetime(value, self.time_unit);
glean.storage().record(glean, &self.meta, &value)
}
/// Gets the stored datetime value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `storage_name` - the storage name to look into.
///
/// # Returns
///
/// The stored value or `None` if nothing stored.
pub(crate) fn get_value(&self, glean: &Glean, storage_name: &str) -> Option<Datetime> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta().identifier(glean),
) {
Some(Metric::Datetime(dt, _)) => Some(dt),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a String.
///
/// The precision of this value is truncated to the `time_unit` precision.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_string(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Datetime(d, tu)) => Some(get_iso_time_string(d, tu)),
_ => None,
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
#![allow(clippy::too_many_arguments)]
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::Metric;
use crate::metrics::MetricType;
use crate::storage::StorageManager;
use crate::util::{get_iso_time_string, local_now_with_offset};
use crate::CommonMetricData;
use crate::Glean;
use chrono::{DateTime, FixedOffset, TimeZone};
/// A datetime type.
///
/// Used to feed data to the `DatetimeMetric`.
pub type Datetime = DateTime<FixedOffset>;
/// A datetime metric.
///
/// Used to record an absolute date and time, such as the time the user first ran
/// the application.
#[derive(Debug)]
pub struct DatetimeMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
}
impl MetricType for DatetimeMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl DatetimeMetric {
/// Create a new datetime metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self { meta, time_unit }
}
/// Public facing API for setting the metric to a date/time which
/// includes the timezone offset.
///
/// ## Arguments:
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `year` - the year to set the metric to.
/// * `month` - the month to set the metric to (1-12).
/// * `day` - the day to set the metric to (1-based).
/// * `hour` - the hour to set the metric to.
/// * `minute` - the minute to set the metric to.
/// * `second` - the second to set the metric to.
/// * `nano` - the nanosecond fraction to the last whole second.
/// * `offset_seconds` - the timezone difference, in seconds, for the Eastern
/// Hemisphere. Negative seconds mean Western Hemisphere.
pub fn set_with_details(
&self,
glean: &Glean,
year: i32,
month: u32,
day: u32,
hour: u32,
minute: u32,
second: u32,
nano: u32,
offset_seconds: i32,
) {
let timezone_offset = FixedOffset::east_opt(offset_seconds);
if timezone_offset.is_none() {
let msg = format!("Invalid timezone offset {}. Not recording.", offset_seconds);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
return;
};
let datetime_obj = FixedOffset::east(offset_seconds)
.ymd_opt(year, month, day)
.and_hms_nano_opt(hour, minute, second, nano);
match datetime_obj.single() {
Some(d) => self.set(glean, Some(d)),
_ => {
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
"Invalid input data. Not recording.",
None,
);
}
}
}
/// Public facing API for setting the metric to a date/time which
/// includes the timezone offset.
///
/// ## Arguments:
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - Some date/time value, with offset, to set the metric to.
/// If none, the current local time is used.
pub fn set(&self, glean: &Glean, value: Option<Datetime>) {
if !self.should_record(glean) {
return;
}
let value = value.unwrap_or_else(local_now_with_offset);
let value = Metric::Datetime(value, self.time_unit);
glean.storage().record(glean, &self.meta, &value)
}
/// Get the stored datetime value.
///
/// ## Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `storage_name` - the storage name to look into.
///
/// ## Return value
///
/// Returns the stored value or `None` if nothing stored.
pub(crate) fn get_value(&self, glean: &Glean, storage_name: &str) -> Option<Datetime> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta().identifier(glean),
) {
Some(Metric::Datetime(dt, _)) => Some(dt),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently stored value as a String.
/// The precision of this value is truncated to the `time_unit`
/// precision.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_string(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Datetime(d, tu)) => Some(get_iso_time_string(d, tu)),
_ => None,
}
}
}

Просмотреть файл

@ -1,139 +1,135 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde_json::{json, Value as JsonValue};
use crate::error_recording::{record_error, ErrorType};
use crate::event_database::RecordedEvent;
use crate::metrics::MetricType;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
use crate::Glean;
const MAX_LENGTH_EXTRA_KEY_VALUE: usize = 100;
/// An event metric.
///
/// Events allow recording of e.g. individual occurences of user actions, say
/// every time a view was open and from where. Each time you record an event, it
/// records a timestamp, the event's name and a set of custom values.
#[derive(Clone, Debug)]
pub struct EventMetric {
meta: CommonMetricData,
allowed_extra_keys: Vec<String>,
}
impl MetricType for EventMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl EventMetric {
/// Creates a new event metric.
pub fn new(meta: CommonMetricData, allowed_extra_keys: Vec<String>) -> Self {
Self {
meta,
allowed_extra_keys,
}
}
/// Records an event.
///
/// # Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `timestamp` - A monotonically increasing timestamp, in milliseconds.
/// This must be provided since the actual recording of the event may
/// happen some time later than the moment the event occurred.
/// * `extra` - A HashMap of (key, value) pairs. The key is an index into
/// the metric's `allowed_extra_keys` vector where the key's string is
/// looked up. If any key index is out of range, an error is reported and
/// no event is recorded.
pub fn record<M: Into<Option<HashMap<i32, String>>>>(
&self,
glean: &Glean,
timestamp: u64,
extra: M,
) {
if !self.should_record(glean) {
return;
}
let extra = extra.into();
let extra_strings: Option<HashMap<String, String>> = if let Some(extra) = extra {
if extra.is_empty() {
None
} else {
let mut extra_strings = HashMap::new();
for (k, v) in extra.into_iter() {
match self.allowed_extra_keys.get(k as usize) {
Some(k) => extra_strings.insert(
k.to_string(),
truncate_string_at_boundary_with_error(
glean,
&self.meta,
v,
MAX_LENGTH_EXTRA_KEY_VALUE,
),
),
None => {
let msg = format!("Invalid key index {}", k);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
return;
}
};
}
Some(extra_strings)
}
} else {
None
};
glean
.event_storage()
.record(glean, &self.meta, timestamp, extra_strings);
}
/// **Test-only API (exported for FFI purposes).**
///
/// Tests whether there are currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_has_value(&self, glean: &Glean, store_name: &str) -> bool {
glean.event_storage().test_has_value(&self.meta, store_name)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the vector of currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, store_name: &str) -> Option<Vec<RecordedEvent>> {
glean.event_storage().test_get_value(&self.meta, store_name)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored events for this event metric as a JSON-encoded string.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(&self, glean: &Glean, store_name: &str) -> String {
match self.test_get_value(glean, store_name) {
Some(value) => json!(value),
None => json!(JsonValue::Null),
}
.to_string()
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use serde_json::{json, Value as JsonValue};
use crate::error_recording::{record_error, ErrorType};
use crate::event_database::RecordedEvent;
use crate::metrics::MetricType;
use crate::util::truncate_string_at_boundary_with_error;
use crate::CommonMetricData;
use crate::Glean;
const MAX_LENGTH_EXTRA_KEY_VALUE: usize = 100;
/// An event metric.
///
/// Events allow recording of e.g. individual occurences of user actions, say
/// every time a view was open and from where. Each time you record an event, it
/// records a timestamp, the event's name and a set of custom values.
#[derive(Clone, Debug)]
pub struct EventMetric {
meta: CommonMetricData,
allowed_extra_keys: Vec<String>,
}
impl MetricType for EventMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl EventMetric {
/// Create a new event metric.
pub fn new(meta: CommonMetricData, allowed_extra_keys: Vec<String>) -> Self {
Self {
meta,
allowed_extra_keys,
}
}
/// Record an event.
///
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `timestamp` - A monotonically increasing timestamp, in milliseconds.
/// This must be provided since the actual recording of the event may
/// happen some time later than the moment the event occurred.
/// * `extra` - A HashMap of (key, value) pairs. The key is an index into
/// the metric's `allowed_extra_keys` vector where the key's string is
/// looked up. If any key index is out of range, an error is reported and
/// no event is recorded.
pub fn record<M: Into<Option<HashMap<i32, String>>>>(
&self,
glean: &Glean,
timestamp: u64,
extra: M,
) {
if !self.should_record(glean) {
return;
}
let extra = extra.into();
let extra_strings: Option<HashMap<String, String>> = if let Some(extra) = extra {
if extra.is_empty() {
None
} else {
let mut extra_strings = HashMap::new();
for (k, v) in extra.into_iter() {
match self.allowed_extra_keys.get(k as usize) {
Some(k) => extra_strings.insert(
k.to_string(),
truncate_string_at_boundary_with_error(
glean,
&self.meta,
v,
MAX_LENGTH_EXTRA_KEY_VALUE,
),
),
None => {
let msg = format!("Invalid key index {}", k);
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
return;
}
};
}
Some(extra_strings)
}
} else {
None
};
glean
.event_storage()
.record(glean, &self.meta, timestamp, extra_strings);
}
/// **Test-only API (exported for FFI purposes).**
///
/// Test whether there are currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_has_value(&self, glean: &Glean, store_name: &str) -> bool {
glean.event_storage().test_has_value(&self.meta, store_name)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the vector of currently stored events for this event metric.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, store_name: &str) -> Option<Vec<RecordedEvent>> {
glean.event_storage().test_get_value(&self.meta, store_name)
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently stored events for this event metric as a JSON-encoded string.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(&self, glean: &Glean, store_name: &str) -> String {
match self.test_get_value(glean, store_name) {
Some(value) => json!(value),
None => json!(JsonValue::Null),
}
.to_string()
}
}

Просмотреть файл

@ -35,12 +35,10 @@ pub struct RecordedExperimentData {
}
impl RecordedExperimentData {
/// Gets the recorded experiment data as a JSON value.
///
/// For JSON, we don't want to include `{"extra": null}` -- we just want to skip
/// `extra` entirely. Unfortunately, we can't use a serde field annotation for this,
/// since that would break bincode serialization, which doesn't support skipping
/// fields. Therefore, we use a custom serialization function just for JSON here.
// For JSON, we don't want to include {"extra": null} -- we just want to skip
// extra entirely. Unfortunately, we can't use a serde field annotation for this,
// since that would break bincode serialization, which doesn't support skipping
// fields. Therefore, we use a custom serialization function just for JSON here.
pub fn as_json(&self) -> JsonValue {
let mut value = JsonMap::new();
value.insert("branch".to_string(), json!(self.branch));
@ -71,9 +69,9 @@ impl MetricType for ExperimentMetric {
}
impl ExperimentMetric {
/// Creates a new experiment metric.
/// Create a new experiment metric.
///
/// # Arguments
/// ## Arguments
///
/// * `id` - the id of the experiment. Please note that this will be
/// truncated to `MAX_EXPERIMENTS_IDS_LEN`, if needed.
@ -118,9 +116,9 @@ impl ExperimentMetric {
new_experiment
}
/// Records an experiment as active.
/// Record an experiment as active.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `branch` - the active branch of the experiment. Please note that this will be
@ -150,7 +148,7 @@ impl ExperimentMetric {
};
// Apply limits to extras
let truncated_extras = extra.map(|extra| {
let truncated_extras = extra.and_then(|extra| {
if extra.len() > MAX_EXPERIMENTS_EXTRAS_SIZE {
let msg = format!(
"Extra hash map length {} exceeds maximum of {}",
@ -185,7 +183,7 @@ impl ExperimentMetric {
temp_map.insert(truncated_key, truncated_value);
}
temp_map
Some(temp_map)
});
let value = Metric::Experiment(RecordedExperimentData {
@ -195,9 +193,9 @@ impl ExperimentMetric {
glean.storage().record(glean, &self.meta, &value)
}
/// Records an experiment as inactive.
/// Record an experiment as inactive.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
pub fn set_inactive(&self, glean: &Glean) {
@ -216,7 +214,7 @@ impl ExperimentMetric {
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored experiment data as a JSON representation of
/// Get the currently stored experiment data as a JSON representation of
/// the RecordedExperimentData.
///
/// This doesn't clear the stored value.

472
third_party/rust/glean-core/src/metrics/jwe.rs поставляемый
Просмотреть файл

@ -1,472 +0,0 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::fmt;
use std::str::FromStr;
use serde::Serialize;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::{Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
const DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT: usize = 1024;
/// Verifies if a string is [`BASE64URL`](https://tools.ietf.org/html/rfc4648#section-5) compliant.
///
/// As such, the string must match the regex: `[a-zA-Z0-9\-\_]*`.
///
/// > **Note** As described in the [JWS specification](https://tools.ietf.org/html/rfc7515#section-2),
/// > the BASE64URL encoding used by JWE discards any padding,
/// > that is why we can ignore that for this validation.
///
/// The regex crate isn't used here because it adds to the binary size,
/// and the Glean SDK doesn't use regular expressions anywhere else.
fn validate_base64url_encoding(value: &str) -> bool {
let mut iter = value.chars();
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('_') | Some('-') | Some('a'..='z') | Some('A'..='Z') | Some('0'..='9') => (),
// An invalid character.
Some(_) => return false,
}
}
}
/// Representation of a [JWE](https://tools.ietf.org/html/rfc7516).
///
/// **Note** Variable sized elements will be constrained to a length of DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT,
/// this is a constraint introduced by Glean to prevent abuses and not part of the spec.
#[derive(Serialize)]
struct Jwe {
/// A variable-size JWE protected header.
header: String,
/// A variable-size [encrypted key](https://tools.ietf.org/html/rfc7516#appendix-A.1.3).
/// This can be an empty octet sequence.
key: String,
/// A fixed-size, 96-bit, base64 encoded [JWE Initialization vector](https://tools.ietf.org/html/rfc7516#appendix-A.1.4) (e.g. “48V1_ALb6US04U3b”).
/// If not required by the encryption algorithm, can be an empty octet sequence.
init_vector: String,
/// The variable-size base64 encoded cipher text.
cipher_text: String,
/// A fixed-size, 132-bit, base64 encoded authentication tag.
/// Can be an empty octet sequence.
auth_tag: String,
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl Jwe {
/// Create a new JWE struct.
fn new<S: Into<String>>(
header: S,
key: S,
init_vector: S,
cipher_text: S,
auth_tag: S,
) -> Result<Self, (ErrorType, String)> {
let mut header = header.into();
header = Self::validate_non_empty("header", header)?;
header = Self::validate_max_size("header", header)?;
header = Self::validate_base64url_encoding("header", header)?;
let mut key = key.into();
key = Self::validate_max_size("key", key)?;
key = Self::validate_base64url_encoding("key", key)?;
let mut init_vector = init_vector.into();
init_vector = Self::validate_fixed_size_or_empty("init_vector", init_vector, 96)?;
init_vector = Self::validate_base64url_encoding("init_vector", init_vector)?;
let mut cipher_text = cipher_text.into();
cipher_text = Self::validate_non_empty("cipher_text", cipher_text)?;
cipher_text = Self::validate_max_size("cipher_text", cipher_text)?;
cipher_text = Self::validate_base64url_encoding("cipher_text", cipher_text)?;
let mut auth_tag = auth_tag.into();
auth_tag = Self::validate_fixed_size_or_empty("auth_tag", auth_tag, 128)?;
auth_tag = Self::validate_base64url_encoding("auth_tag", auth_tag)?;
Ok(Self {
header,
key,
init_vector,
cipher_text,
auth_tag,
})
}
fn validate_base64url_encoding(
name: &str,
value: String,
) -> Result<String, (ErrorType, String)> {
if !validate_base64url_encoding(&value) {
return Err((
ErrorType::InvalidValue,
format!("`{}` element in JWE value is not valid BASE64URL.", name),
));
}
Ok(value)
}
fn validate_non_empty(name: &str, value: String) -> Result<String, (ErrorType, String)> {
if value.is_empty() {
return Err((
ErrorType::InvalidValue,
format!("`{}` element in JWE value must not be empty.", name),
));
}
Ok(value)
}
fn validate_max_size(name: &str, value: String) -> Result<String, (ErrorType, String)> {
if value.len() > DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT {
return Err((
ErrorType::InvalidOverflow,
format!(
"`{}` element in JWE value must not exceed {} characters.",
name, DEFAULT_MAX_CHARS_PER_VARIABLE_SIZE_ELEMENT
),
));
}
Ok(value)
}
fn validate_fixed_size_or_empty(
name: &str,
value: String,
size_in_bits: usize,
) -> Result<String, (ErrorType, String)> {
// Each Base64 digit represents exactly 6 bits of data.
// By dividing the size_in_bits by 6 and ceiling the result,
// we get the amount of characters the value should have.
let num_chars = (size_in_bits as f32 / 6f32).ceil() as usize;
if !value.is_empty() && value.len() != num_chars {
return Err((
ErrorType::InvalidOverflow,
format!(
"`{}` element in JWE value must have exactly {}-bits or be empty.",
name, size_in_bits
),
));
}
Ok(value)
}
}
/// Trait implementation to convert a JWE [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7)
/// string into a Jwe struct.
impl FromStr for Jwe {
type Err = (ErrorType, String);
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut elements: Vec<&str> = s.split('.').collect();
if elements.len() != 5 {
return Err((
ErrorType::InvalidValue,
"JWE value is not formatted as expected.".into(),
));
}
// Consume the vector extracting each part of the JWE from it.
//
// Safe unwraps, we already defined that the slice has five elements.
let auth_tag = elements.pop().unwrap();
let cipher_text = elements.pop().unwrap();
let init_vector = elements.pop().unwrap();
let key = elements.pop().unwrap();
let header = elements.pop().unwrap();
Self::new(header, key, init_vector, cipher_text, auth_tag)
}
}
/// Trait implementation to print the Jwe struct as the proper JWE [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7).
impl fmt::Display for Jwe {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}.{}.{}.{}.{}",
self.header, self.key, self.init_vector, self.cipher_text, self.auth_tag
)
}
}
/// A JWE metric.
///
/// This metric will be work as a "transport" for JWE encrypted data.
///
/// The actual encrypti on is done somewhere else,
/// Glean must only make sure the data is valid JWE.
#[derive(Clone, Debug)]
pub struct JweMetric {
meta: CommonMetricData,
}
impl MetricType for JweMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl JweMetric {
/// Creates a new JWE metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified JWE value.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `value` - the [`compact representation`](https://tools.ietf.org/html/rfc7516#appendix-A.2.7) of a JWE value.
pub fn set_with_compact_representation<S: Into<String>>(&self, glean: &Glean, value: S) {
if !self.should_record(glean) {
return;
}
let value = value.into();
match Jwe::from_str(&value) {
Ok(_) => glean
.storage()
.record(glean, &self.meta, &Metric::Jwe(value)),
Err((error_type, msg)) => record_error(glean, &self.meta, error_type, msg, None),
};
}
/// Builds a JWE value from its elements and set to it.
///
/// # Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `header` - the JWE Protected Header element.
/// * `key` - the JWE Encrypted Key element.
/// * `init_vector` - the JWE Initialization Vector element.
/// * `cipher_text` - the JWE Ciphertext element.
/// * `auth_tag` - the JWE Authentication Tag element.
pub fn set<S: Into<String>>(
&self,
glean: &Glean,
header: S,
key: S,
init_vector: S,
cipher_text: S,
auth_tag: S,
) {
if !self.should_record(glean) {
return;
}
match Jwe::new(header, key, init_vector, cipher_text, auth_tag) {
Ok(jwe) => glean
.storage()
.record(glean, &self.meta, &Metric::Jwe(jwe.to_string())),
Err((error_type, msg)) => record_error(glean, &self.meta, error_type, msg, None),
};
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::Jwe(b)) => Some(b),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored JWE as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name).map(|snapshot| {
serde_json::to_string(
&Jwe::from_str(&snapshot).expect("Stored JWE metric should be valid JWE value."),
)
.unwrap()
})
}
}
#[cfg(test)]
mod test {
use super::*;
const HEADER: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ";
const KEY: &str = "OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg";
const INIT_VECTOR: &str = "48V1_ALb6US04U3b";
const CIPHER_TEXT: &str =
"5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A";
const AUTH_TAG: &str = "XFBoMYUZodetZdvTiFvSkQ";
const JWE: &str = "eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ.OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg.48V1_ALb6US04U3b.5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A.XFBoMYUZodetZdvTiFvSkQ";
#[test]
fn generates_jwe_from_correct_input() {
let jwe = Jwe::from_str(JWE).unwrap();
assert_eq!(jwe.header, HEADER);
assert_eq!(jwe.key, KEY);
assert_eq!(jwe.init_vector, INIT_VECTOR);
assert_eq!(jwe.cipher_text, CIPHER_TEXT);
assert_eq!(jwe.auth_tag, AUTH_TAG);
assert!(Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG).is_ok());
}
#[test]
fn jwe_validates_header_value_correctly() {
// When header is empty, correct error is returned
match Jwe::new("", KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
// When header is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(
too_long,
KEY.into(),
INIT_VECTOR.into(),
CIPHER_TEXT.into(),
AUTH_TAG.into(),
) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When header is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(not64, KEY, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_key_value_correctly() {
// When key is empty,JWE is created
assert!(Jwe::new(HEADER, "", INIT_VECTOR, CIPHER_TEXT, AUTH_TAG).is_ok());
// When key is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(HEADER, &too_long, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When key is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(HEADER, not64, INIT_VECTOR, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_init_vector_value_correctly() {
// When init_vector is empty, JWE is created
assert!(Jwe::new(HEADER, KEY, "", CIPHER_TEXT, AUTH_TAG).is_ok());
// When init_vector is not the correct size, correct error is returned
match Jwe::new(HEADER, KEY, "foo", CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When init_vector is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!!";
match Jwe::new(HEADER, KEY, not64, CIPHER_TEXT, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_cipher_text_value_correctly() {
// When cipher_text is empty, correct error is returned
match Jwe::new(HEADER, KEY, INIT_VECTOR, "", AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
// When cipher_text is bigger than max size, correct error is returned
let too_long = (0..1025).map(|_| "X").collect::<String>();
match Jwe::new(HEADER, KEY, INIT_VECTOR, &too_long, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When cipher_text is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!";
match Jwe::new(HEADER, KEY, INIT_VECTOR, not64, AUTH_TAG) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn jwe_validates_auth_tag_value_correctly() {
// When auth_tag is empty, JWE is created
assert!(Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, "").is_ok());
// When auth_tag is not the correct size, correct error is returned
match Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, "foo") {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidOverflow),
}
// When auth_tag is not valid BASE64URL, correct error is returned
let not64 = "inv@alid value!!!!!!!!";
match Jwe::new(HEADER, KEY, INIT_VECTOR, CIPHER_TEXT, not64) {
Ok(_) => panic!("Should not have built JWE successfully."),
Err((error_type, _)) => assert_eq!(error_type, ErrorType::InvalidValue),
}
}
#[test]
fn tranforms_jwe_struct_to_string_correctly() {
let jwe = Jwe::from_str(JWE).unwrap();
assert_eq!(jwe.to_string(), JWE);
}
#[test]
fn validates_base64url_correctly() {
assert!(validate_base64url_encoding(
"0987654321AaBbCcDdEeFfGgHhIiKkLlMmNnOoPpQqRrSsTtUuVvXxWwYyZz-_"
));
assert!(validate_base64url_encoding(""));
assert!(!validate_base64url_encoding("aa aa"));
assert!(!validate_base64url_encoding("aa.aa"));
assert!(!validate_base64url_encoding("!nv@lid-val*e"));
}
}

Просмотреть файл

@ -1,252 +1,253 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::common_metric_data::CommonMetricData;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::{Metric, MetricType};
use crate::Glean;
const MAX_LABELS: usize = 16;
const OTHER_LABEL: &str = "__other__";
const MAX_LABEL_LENGTH: usize = 61;
/// Checks whether the given value matches the label regex.
///
/// This regex is used for matching against labels and should allow for dots,
/// underscores, and/or hyphens. Labels are also limited to starting with either
/// a letter or an underscore character.
///
/// The exact regex (from the pipeline schema [here](https://github.com/mozilla-services/mozilla-pipeline-schemas/blob/master/templates/include/glean/dot_separated_short_id.1.schema.json)) is:
///
/// "^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$"
///
/// The regex crate isn't used here because it adds to the binary size, and the
/// Glean SDK doesn't use regular expressions anywhere else.
///
/// Some examples of good and bad labels:
///
/// Good:
/// * `this.is.fine`
/// * `this_is_fine_too`
/// * `this.is_still_fine`
/// * `thisisfine`
/// * `_.is_fine`
/// * `this.is-fine`
/// * `this-is-fine`
/// Bad:
/// * `this.is.not_fine_due_tu_the_length_being_too_long_i_thing.i.guess`
/// * `1.not_fine`
/// * `this.$isnotfine`
/// * `-.not_fine`
fn matches_label_regex(value: &str) -> bool {
let mut iter = value.chars();
loop {
// Match the first letter in the word.
match iter.next() {
Some('_') | Some('a'..='z') => (),
_ => return false,
};
// Match subsequent letters in the word.
let mut count = 0;
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('_') | Some('-') | Some('a'..='z') | Some('0'..='9') => (),
// We ended a word, so iterate over the outer loop again.
Some('.') => break,
// An invalid character
_ => return false,
}
count += 1;
// We allow 30 characters per word, but the first one is handled
// above outside of this loop, so we have a maximum of 29 here.
if count == 29 {
return false;
}
}
}
}
/// A labeled metric.
///
/// Labeled metrics allow to record multiple sub-metrics of the same type under different string labels.
#[derive(Clone, Debug)]
pub struct LabeledMetric<T> {
labels: Option<Vec<String>>,
/// Type of the underlying metric
/// We hold on to an instance of it, which is cloned to create new modified instances.
submetric: T,
}
impl<T> LabeledMetric<T>
where
T: MetricType + Clone,
{
/// Creates a new labeled metric from the given metric instance and optional list of labels.
///
/// See [`get`](#method.get) for information on how static or dynamic labels are handled.
pub fn new(submetric: T, labels: Option<Vec<String>>) -> LabeledMetric<T> {
LabeledMetric { labels, submetric }
}
/// Creates a new metric with a specific label.
///
/// This is used for static labels where we can just set the name to be `name/label`.
fn new_metric_with_name(&self, name: String) -> T {
let mut t = self.submetric.clone();
t.meta_mut().name = name;
t
}
/// Creates a new metric with a specific label.
///
/// This is used for dynamic labels where we have to actually validate and correct the
/// label later when we have a Glean object.
fn new_metric_with_dynamic_label(&self, label: String) -> T {
let mut t = self.submetric.clone();
t.meta_mut().dynamic_label = Some(label);
t
}
/// Creates a static label.
///
/// # Safety
///
/// Should only be called when static labels are available on this metric.
///
/// # Arguments
///
/// * `label` - The requested label
///
/// # Returns
///
/// The requested label if it is in the list of allowed labels.
/// Otherwise `OTHER_LABEL` is returned.
fn static_label<'a>(&self, label: &'a str) -> &'a str {
debug_assert!(self.labels.is_some());
let labels = self.labels.as_ref().unwrap();
if labels.iter().any(|l| l == label) {
label
} else {
OTHER_LABEL
}
}
/// Gets a specific metric for a given label.
///
/// If a set of acceptable labels were specified in the `metrics.yaml` file,
/// and the given label is not in the set, it will be recorded under the special `OTHER_LABEL` label.
///
/// If a set of acceptable labels was not specified in the `metrics.yaml` file,
/// only the first 16 unique labels will be used.
/// After that, any additional labels will be recorded under the special `OTHER_LABEL` label.
///
/// Labels must be `snake_case` and less than 30 characters.
/// If an invalid label is used, the metric will be recorded in the special `OTHER_LABEL` label.
pub fn get(&self, label: &str) -> T {
// We have 2 scenarios to consider:
// * Static labels. No database access needed. We just look at what is in memory.
// * Dynamic labels. We look up in the database all previously stored
// labels in order to keep a maximum of allowed labels. This is done later
// when the specific metric is actually recorded, when we are guaranteed to have
// an initialized Glean object.
match self.labels {
Some(_) => {
let label = self.static_label(label);
self.new_metric_with_name(combine_base_identifier_and_label(
&self.submetric.meta().name,
&label,
))
}
None => self.new_metric_with_dynamic_label(label.to_string()),
}
}
/// Gets the template submetric.
///
/// The template submetric is the actual metric that is cloned and modified
/// to record for a specific label.
pub fn get_submetric(&self) -> &T {
&self.submetric
}
}
/// Combines a metric's base identifier and label
pub fn combine_base_identifier_and_label(base_identifer: &str, label: &str) -> String {
format!("{}/{}", base_identifer, label)
}
/// Strips the label off of a complete identifier
pub fn strip_label(identifier: &str) -> &str {
// safe unwrap, first field of a split always valid
identifier.splitn(2, '/').next().unwrap()
}
/// Validates a dynamic label, changing it to OTHER_LABEL if it's invalid.
///
/// Checks the requested label against limitations, such as the label length and allowed
/// characters.
///
/// # Arguments
///
/// * `label` - The requested label
///
/// # Returns
///
/// The entire identifier for the metric, including the base identifier and the corrected label.
/// The errors are logged.
pub fn dynamic_label(
glean: &Glean,
meta: &CommonMetricData,
base_identifier: &str,
label: &str,
) -> String {
let key = combine_base_identifier_and_label(base_identifier, label);
for store in &meta.send_in_pings {
if glean.storage().has_metric(meta.lifetime, store, &key) {
return key;
}
}
let mut label_count = 0;
let prefix = &key[..=base_identifier.len()];
let mut snapshotter = |_: &[u8], _: &Metric| {
label_count += 1;
};
let lifetime = meta.lifetime;
for store in &meta.send_in_pings {
glean
.storage()
.iter_store_from(lifetime, store, Some(&prefix), &mut snapshotter);
}
let error = if label_count >= MAX_LABELS {
true
} else if label.len() > MAX_LABEL_LENGTH {
let msg = format!(
"label length {} exceeds maximum of {}",
label.len(),
MAX_LABEL_LENGTH
);
record_error(glean, meta, ErrorType::InvalidLabel, msg, None);
true
} else if !matches_label_regex(label) {
let msg = format!("label must be snake_case, got '{}'", label);
record_error(glean, meta, ErrorType::InvalidLabel, msg, None);
true
} else {
false
};
if error {
combine_base_identifier_and_label(base_identifier, OTHER_LABEL)
} else {
key
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::common_metric_data::CommonMetricData;
use crate::error_recording::{record_error, ErrorType};
use crate::metrics::{Metric, MetricType};
use crate::Glean;
const MAX_LABELS: usize = 16;
const OTHER_LABEL: &str = "__other__";
const MAX_LABEL_LENGTH: usize = 61;
/// Checks whether the given value matches the label regex.
///
/// This regex is used for matching against labels and should allow for dots,
/// underscores, and/or hyphens. Labels are also limited to starting with either
/// a letter or an underscore character.
///
/// The exact regex (from the pipeline schema [here](https://github.com/mozilla-services/mozilla-pipeline-schemas/blob/master/templates/include/glean/dot_separated_short_id.1.schema.json)) is:
///
/// "^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$"
///
/// The regex crate isn't used here because it adds to the binary size, and the
/// Glean SDK doesn't use regular expressions anywhere else.
///
/// Some examples of good and bad labels:
///
/// Good:
/// * `this.is.fine`
/// * `this_is_fine_too`
/// * `this.is_still_fine`
/// * `thisisfine`
/// * `_.is_fine`
/// * `this.is-fine`
/// * `this-is-fine`
/// Bad:
/// * `this.is.not_fine_due_tu_the_length_being_too_long_i_thing.i.guess`
/// * `1.not_fine`
/// * `this.$isnotfine`
/// * `-.not_fine`
fn matches_label_regex(value: &str) -> bool {
let mut iter = value.chars();
loop {
// Match the first letter in the word.
match iter.next() {
Some('_') | Some('a'..='z') => (),
_ => return false,
};
// Match subsequent letters in the word.
let mut count = 0;
loop {
match iter.next() {
// We are done, so the whole expression is valid.
None => return true,
// Valid characters.
Some('_') | Some('-') | Some('a'..='z') | Some('0'..='9') => (),
// We ended a word, so iterate over the outer loop again.
Some('.') => break,
// An invalid character
_ => return false,
}
count += 1;
// We allow 30 characters per word, but the first one is handled
// above outside of this loop, so we have a maximum of 29 here.
if count == 29 {
return false;
}
}
}
}
/// A labeled metric.
///
/// Labeled metrics allow to record multiple sub-metrics of the same type under different string labels.
#[derive(Clone, Debug)]
pub struct LabeledMetric<T> {
labels: Option<Vec<String>>,
/// Type of the underlying metric
/// We hold on to an instance of it, which is cloned to create new modified instances.
submetric: T,
}
impl<T> LabeledMetric<T>
where
T: MetricType + Clone,
{
/// Create a new labeled metric from the given metric instance and optional list of labels.
///
/// See [`get`](#method.get) for information on how static or dynamic labels are handled.
pub fn new(submetric: T, labels: Option<Vec<String>>) -> LabeledMetric<T> {
LabeledMetric { labels, submetric }
}
/// Creates a new metric with a specific label.
///
/// This is used for static labels where we can just set the name to be `name/label`.
fn new_metric_with_name(&self, name: String) -> T {
let mut t = self.submetric.clone();
t.meta_mut().name = name;
t
}
/// Creates a new metric with a specific label.
///
/// This is used for dynamic labels where we have to actually validate and correct the
/// label later when we have a Glean object.
fn new_metric_with_dynamic_label(&self, label: String) -> T {
let mut t = self.submetric.clone();
t.meta_mut().dynamic_label = Some(label);
t
}
/// Create a static label.
///
/// ## Safety
///
/// Should only be called when static labels are available on this metric.
///
/// ## Arguments
///
/// * `label` - The requested label
///
/// ## Return value
///
/// If the requested label is in the list of allowed labels, it is returned.
/// Otherwise the `OTHER_LABEL` is returned.
fn static_label<'a>(&self, label: &'a str) -> &'a str {
debug_assert!(self.labels.is_some());
let labels = self.labels.as_ref().unwrap();
if labels.iter().any(|l| l == label) {
label
} else {
OTHER_LABEL
}
}
/// Get a specific metric for a given label.
///
/// If a set of acceptable labels were specified in the `metrics.yaml` file,
/// and the given label is not in the set, it will be recorded under the special `OTHER_LABEL` label.
///
/// If a set of acceptable labels was not specified in the `metrics.yaml` file,
/// only the first 16 unique labels will be used.
/// After that, any additional labels will be recorded under the special `OTHER_LABEL` label.
///
/// Labels must be `snake_case` and less than 30 characters.
/// If an invalid label is used, the metric will be recorded in the special `OTHER_LABEL` label.
pub fn get(&self, label: &str) -> T {
// We have 2 scenarios to consider:
// * Static labels. No database access needed. We just look at what is in memory.
// * Dynamic labels. We look up in the database all previously stored
// labels in order to keep a maximum of allowed labels. This is done later
// when the specific metric is actually recorded, when we are guaranteed to have
// an initialized Glean object.
match self.labels {
Some(_) => {
let label = self.static_label(label);
self.new_metric_with_name(combine_base_identifier_and_label(
&self.submetric.meta().name,
&label,
))
}
None => self.new_metric_with_dynamic_label(label.to_string()),
}
}
/// Get the template submetric.
///
/// The template submetric is the actual metric that is cloned and modified
/// to record for a specific label.
pub fn get_submetric(&self) -> &T {
&self.submetric
}
}
/// Combines a metric's base identifier and label
pub fn combine_base_identifier_and_label(base_identifer: &str, label: &str) -> String {
format!("{}/{}", base_identifer, label)
}
/// Strips the label off of a complete identifier
pub fn strip_label(identifier: &str) -> &str {
// safe unwrap, first field of a split always valid
identifier.splitn(2, '/').next().unwrap()
}
/// Validate a dynamic label, changing it to OTHER_LABEL if it's invalid.
///
/// Checks the requested label against limitations, such as the label length and allowed
/// characters.
///
/// ## Arguments
///
/// * `label` - The requested label
///
/// ## Return value
///
/// Returns the entire identifier for the metric, including the base identifier and the
/// corrected label.
/// The errors are logged.
pub fn dynamic_label(
glean: &Glean,
meta: &CommonMetricData,
base_identifier: &str,
label: &str,
) -> String {
let key = combine_base_identifier_and_label(base_identifier, label);
for store in &meta.send_in_pings {
if glean.storage().has_metric(meta.lifetime, store, &key) {
return key;
}
}
let mut label_count = 0;
let prefix = &key[..=base_identifier.len()];
let mut snapshotter = |_: &[u8], _: &Metric| {
label_count += 1;
};
let lifetime = meta.lifetime;
for store in &meta.send_in_pings {
glean
.storage()
.iter_store_from(lifetime, store, Some(&prefix), &mut snapshotter);
}
let error = if label_count >= MAX_LABELS {
true
} else if label.len() > MAX_LABEL_LENGTH {
let msg = format!(
"label length {} exceeds maximum of {}",
label.len(),
MAX_LABEL_LENGTH
);
record_error(glean, meta, ErrorType::InvalidLabel, msg, None);
true
} else if !matches_label_regex(label) {
let msg = format!("label must be snake_case, got '{}'", label);
record_error(glean, meta, ErrorType::InvalidLabel, msg, None);
true
} else {
false
};
if error {
combine_base_identifier_and_label(base_identifier, OTHER_LABEL)
} else {
key
}
}

Просмотреть файл

@ -1,212 +1,204 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::memory_unit::MemoryUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
// The base of the logarithm used to determine bucketing
const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 16.0;
// Set a maximum recordable value of 1 terabyte so the buckets aren't
// completely unbounded.
const MAX_BYTES: u64 = 1 << 40;
/// A memory distribution metric.
///
/// Memory distributions are used to accumulate and store memory sizes.
#[derive(Debug)]
pub struct MemoryDistributionMetric {
meta: CommonMetricData,
memory_unit: MemoryUnit,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
sum: hist.sum(),
}
}
impl MetricType for MemoryDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl MemoryDistributionMetric {
/// Creates a new memory distribution metric.
pub fn new(meta: CommonMetricData, memory_unit: MemoryUnit) -> Self {
Self { meta, memory_unit }
}
/// Accumulates the provided sample in the metric.
///
/// # Arguments
///
/// * `sample` - The sample to be recorded by the metric. The sample is assumed to be in the
/// configured memory unit of the metric.
///
/// ## Notes
///
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
pub fn accumulate(&self, glean: &Glean, sample: u64) {
if !self.should_record(glean) {
return;
}
let mut sample = self.memory_unit.as_bytes(sample);
if sample > MAX_BYTES {
let msg = "Sample is bigger than 1 terabyte";
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
sample = MAX_BYTES;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::MemoryDistribution(mut hist)) => {
hist.accumulate(sample);
Metric::MemoryDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(sample);
Metric::MemoryDistribution(hist)
}
});
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [MemoryDistributionMetricType] and the instance this
/// method was called on is using [MemoryUnit.Kilobyte], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
let mut num_too_log_samples = 0;
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::MemoryDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
let mut sample = self.memory_unit.as_bytes(sample);
if sample > MAX_BYTES {
num_too_log_samples += 1;
sample = MAX_BYTES;
}
hist.accumulate(sample);
}
}
Metric::MemoryDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_log_samples > 0 {
let msg = format!(
"Accumulated {} samples larger than 1TB",
num_too_log_samples
);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_too_log_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::MemoryDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::memory_unit::MemoryUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
// The base of the logarithm used to determine bucketing
const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 16.0;
// Set a maximum recordable value of 1 terabyte so the buckets aren't
// completely unbounded.
const MAX_BYTES: u64 = 1 << 40;
/// A memory distribution metric.
///
/// Memory distributions are used to accumulate and store memory sizes.
#[derive(Debug)]
pub struct MemoryDistributionMetric {
meta: CommonMetricData,
memory_unit: MemoryUnit,
}
/// Create a snapshot of the histogram.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
sum: hist.sum(),
}
}
impl MetricType for MemoryDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl MemoryDistributionMetric {
/// Create a new memory distribution metric.
pub fn new(meta: CommonMetricData, memory_unit: MemoryUnit) -> Self {
Self { meta, memory_unit }
}
/// Accumulates the provided sample in the metric.
///
/// ## Arguments
///
/// * `sample` - The sample to be recorded by the metric. The sample is assumed to be in the
/// configured memory unit of the metric.
///
/// ## Notes
///
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
pub fn accumulate(&self, glean: &Glean, sample: u64) {
if !self.should_record(glean) {
return;
}
let mut sample = self.memory_unit.as_bytes(sample);
if sample > MAX_BYTES {
let msg = "Sample is bigger than 1 terabyte";
record_error(glean, &self.meta, ErrorType::InvalidValue, msg, None);
sample = MAX_BYTES;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::MemoryDistribution(mut hist)) => {
hist.accumulate(sample);
Metric::MemoryDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(sample);
Metric::MemoryDistribution(hist)
}
});
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [MemoryDistributionMetricType] and the instance this
/// method was called on is using [MemoryUnit.Kilobyte], then `samples` are assumed
/// to be in that unit).
///
/// ## Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them.
/// Values bigger than 1 Terabyte (2<sup>40</sup> bytes) are truncated
/// and an `ErrorType::InvalidValue` error is recorded.
pub fn accumulate_samples_signed(&self, glean: &Glean, samples: Vec<i64>) {
let mut num_negative_samples = 0;
let mut num_too_log_samples = 0;
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::MemoryDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let sample = sample as u64;
let mut sample = self.memory_unit.as_bytes(sample);
if sample > MAX_BYTES {
num_too_log_samples += 1;
sample = MAX_BYTES;
}
hist.accumulate(sample);
}
}
Metric::MemoryDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_log_samples > 0 {
let msg = format!(
"Accumulated {} samples larger than 1TB",
num_too_log_samples
);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_too_log_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::MemoryDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}

Просмотреть файл

@ -1,64 +1,64 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
/// Different resolutions supported by the memory related metric types (e.g.
/// MemoryDistributionMetric).
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
#[repr(i32)] // use i32 to be compatible with our JNA definition
pub enum MemoryUnit {
/// 1 byte
Byte,
/// 2^10 bytes
Kilobyte,
/// 2^20 bytes
Megabyte,
/// 2^30 bytes
Gigabyte,
}
impl MemoryUnit {
/// Converts a value in the given unit to bytes.
///
/// # Arguments
///
/// * `value` - the value to convert.
///
/// # Returns
///
/// The integer representation of the byte value.
pub fn as_bytes(self, value: u64) -> u64 {
use MemoryUnit::*;
match self {
Byte => value,
Kilobyte => value << 10,
Megabyte => value << 20,
Gigabyte => value << 30,
}
}
}
/// Trait implementation for converting an integer value
/// to a MemoryUnit. This is used in the FFI code. Please
/// note that values should match the ordering of the platform
/// specific side of things (e.g. Kotlin implementation).
impl TryFrom<i32> for MemoryUnit {
type Error = Error;
fn try_from(value: i32) -> Result<MemoryUnit, Self::Error> {
match value {
0 => Ok(MemoryUnit::Byte),
1 => Ok(MemoryUnit::Kilobyte),
2 => Ok(MemoryUnit::Megabyte),
3 => Ok(MemoryUnit::Gigabyte),
e => Err(ErrorKind::MemoryUnit(e).into()),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
/// Different resolutions supported by the memory related metric types (e.g.
/// MemoryDistributionMetric).
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
#[repr(i32)] // use i32 to be compatible with our JNA definition
pub enum MemoryUnit {
/// 1 byte
Byte,
/// 2^10 bytes
Kilobyte,
/// 2^20 bytes
Megabyte,
/// 2^30 bytes
Gigabyte,
}
impl MemoryUnit {
/// Convert a value in the given unit to bytes.
///
/// ## Arguments
///
/// * `value` - the value to convert.
///
/// ## Return value
///
/// The integer representation of the byte value.
pub fn as_bytes(self, value: u64) -> u64 {
use MemoryUnit::*;
match self {
Byte => value,
Kilobyte => value << 10,
Megabyte => value << 20,
Gigabyte => value << 30,
}
}
}
/// Trait implementation for converting an integer value
/// to a MemoryUnit. This is used in the FFI code. Please
/// note that values should match the ordering of the platform
/// specific side of things (e.g. Kotlin implementation).
impl TryFrom<i32> for MemoryUnit {
type Error = Error;
fn try_from(value: i32) -> Result<MemoryUnit, Self::Error> {
match value {
0 => Ok(MemoryUnit::Byte),
1 => Ok(MemoryUnit::Kilobyte),
2 => Ok(MemoryUnit::Megabyte),
3 => Ok(MemoryUnit::Gigabyte),
e => Err(ErrorKind::MemoryUnit(e).into()),
}
}
}

368
third_party/rust/glean-core/src/metrics/mod.rs поставляемый
Просмотреть файл

@ -1,187 +1,181 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! The different metric types supported by the Glean SDK to handle data.
use std::collections::HashMap;
use chrono::{DateTime, FixedOffset};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
mod boolean;
mod counter;
mod custom_distribution;
mod datetime;
mod event;
mod experiment;
mod jwe;
mod labeled;
mod memory_distribution;
mod memory_unit;
mod ping;
mod quantity;
mod string;
mod string_list;
mod time_unit;
mod timespan;
mod timing_distribution;
mod uuid;
pub use crate::event_database::RecordedEvent;
use crate::histogram::{Functional, Histogram, PrecomputedExponential, PrecomputedLinear};
pub use crate::metrics::datetime::Datetime;
use crate::util::get_iso_time_string;
use crate::CommonMetricData;
use crate::Glean;
pub use self::boolean::BooleanMetric;
pub use self::counter::CounterMetric;
pub use self::custom_distribution::CustomDistributionMetric;
pub use self::datetime::DatetimeMetric;
pub use self::event::EventMetric;
pub(crate) use self::experiment::ExperimentMetric;
pub use crate::histogram::HistogramType;
// Note: only expose RecordedExperimentData to tests in
// the next line, so that glean-core\src\lib.rs won't fail to build.
#[cfg(test)]
pub(crate) use self::experiment::RecordedExperimentData;
pub use self::jwe::JweMetric;
pub use self::labeled::{
combine_base_identifier_and_label, dynamic_label, strip_label, LabeledMetric,
};
pub use self::memory_distribution::MemoryDistributionMetric;
pub use self::memory_unit::MemoryUnit;
pub use self::ping::PingType;
pub use self::quantity::QuantityMetric;
pub use self::string::StringMetric;
pub use self::string_list::StringListMetric;
pub use self::time_unit::TimeUnit;
pub use self::timespan::TimespanMetric;
pub use self::timing_distribution::TimerId;
pub use self::timing_distribution::TimingDistributionMetric;
pub use self::uuid::UuidMetric;
/// A snapshot of all buckets and the accumulated sum of a distribution.
#[derive(Debug, Serialize)]
pub struct DistributionData {
/// A map containig the bucket index mapped to the accumulated count.
///
/// This can contain buckets with a count of `0`.
pub values: HashMap<u64, u64>,
/// The accumulated sum of all the samples in the distribution.
pub sum: u64,
}
/// The available metrics.
///
/// This is the in-memory and persisted layout of a metric.
///
/// ## Note
///
/// The order of metrics in this enum is important, as it is used for serialization.
/// Do not reorder the variants.
///
/// **Any new metric must be added at the end.**
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum Metric {
/// A boolean metric. See [`BooleanMetric`](struct.BooleanMetric.html) for more information.
Boolean(bool),
/// A counter metric. See [`CounterMetric`](struct.CounterMetric.html) for more information.
Counter(i32),
/// A custom distribution with precomputed exponential bucketing.
/// See [`CustomDistributionMetric`](struct.CustomDistributionMetric.html) for more information.
CustomDistributionExponential(Histogram<PrecomputedExponential>),
/// A custom distribution with precomputed linear bucketing.
/// See [`CustomDistributionMetric`](struct.CustomDistributionMetric.html) for more information.
CustomDistributionLinear(Histogram<PrecomputedLinear>),
/// A datetime metric. See [`DatetimeMetric`](struct.DatetimeMetric.html) for more information.
Datetime(DateTime<FixedOffset>, TimeUnit),
/// An experiment metric. See [`ExperimentMetric`](struct.ExperimentMetric.html) for more information.
Experiment(experiment::RecordedExperimentData),
/// A quantity metric. See [`QuantityMetric`](struct.QuantityMetric.html) for more information.
Quantity(i64),
/// A string metric. See [`StringMetric`](struct.StringMetric.html) for more information.
String(String),
/// A string list metric. See [`StringListMetric`](struct.StringListMetric.html) for more information.
StringList(Vec<String>),
/// A UUID metric. See [`UuidMetric`](struct.UuidMetric.html) for more information.
Uuid(String),
/// A timespan metric. See [`TimespanMetric`](struct.TimespanMetric.html) for more information.
Timespan(std::time::Duration, TimeUnit),
/// A timing distribution. See [`TimingDistributionMetric`](struct.TimingDistributionMetric.html) for more information.
TimingDistribution(Histogram<Functional>),
/// A memory distribution. See [`MemoryDistributionMetric`](struct.MemoryDistributionMetric.html) for more information.
MemoryDistribution(Histogram<Functional>),
/// A JWE metric. See [`JweMetric`](struct.JweMetric.html) for more information.
Jwe(String),
}
/// A `MetricType` describes common behavior across all metrics.
pub trait MetricType {
/// Access the stored metadata
fn meta(&self) -> &CommonMetricData;
/// Access the stored metadata mutable
fn meta_mut(&mut self) -> &mut CommonMetricData;
/// Whether this metric should currently be recorded
///
/// This depends on the metrics own state, as determined by its metadata,
/// and whether upload is enabled on the Glean object.
fn should_record(&self, glean: &Glean) -> bool {
glean.is_upload_enabled() && self.meta().should_record()
}
}
impl Metric {
/// Gets the ping section the metric fits into.
///
/// This determines the section of the ping to place the metric data in when
/// assembling the ping payload.
pub fn ping_section(&self) -> &'static str {
match self {
Metric::Boolean(_) => "boolean",
Metric::Counter(_) => "counter",
// Custom distributions are in the same section, no matter what bucketing.
Metric::CustomDistributionExponential(_) => "custom_distribution",
Metric::CustomDistributionLinear(_) => "custom_distribution",
Metric::Datetime(_, _) => "datetime",
Metric::Experiment(_) => panic!("Experiments should not be serialized through this"),
Metric::Quantity(_) => "quantity",
Metric::String(_) => "string",
Metric::StringList(_) => "string_list",
Metric::Timespan(..) => "timespan",
Metric::TimingDistribution(_) => "timing_distribution",
Metric::Uuid(_) => "uuid",
Metric::MemoryDistribution(_) => "memory_distribution",
Metric::Jwe(_) => "jwe",
}
}
/// The JSON representation of the metric's data
pub fn as_json(&self) -> JsonValue {
match self {
Metric::Boolean(b) => json!(b),
Metric::Counter(c) => json!(c),
Metric::CustomDistributionExponential(hist) => {
json!(custom_distribution::snapshot(hist))
}
Metric::CustomDistributionLinear(hist) => json!(custom_distribution::snapshot(hist)),
Metric::Datetime(d, time_unit) => json!(get_iso_time_string(*d, *time_unit)),
Metric::Experiment(e) => e.as_json(),
Metric::Quantity(q) => json!(q),
Metric::String(s) => json!(s),
Metric::StringList(v) => json!(v),
Metric::Timespan(time, time_unit) => {
json!({"value": time_unit.duration_convert(*time), "time_unit": time_unit})
}
Metric::TimingDistribution(hist) => json!(timing_distribution::snapshot(hist)),
Metric::Uuid(s) => json!(s),
Metric::MemoryDistribution(hist) => json!(memory_distribution::snapshot(hist)),
Metric::Jwe(s) => json!(s),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
//! The different metric types supported by the Glean SDK to handle data.
use std::collections::HashMap;
use chrono::{DateTime, FixedOffset};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value as JsonValue};
mod boolean;
mod counter;
mod custom_distribution;
mod datetime;
mod event;
mod experiment;
mod labeled;
mod memory_distribution;
mod memory_unit;
mod ping;
mod quantity;
mod string;
mod string_list;
mod time_unit;
mod timespan;
mod timing_distribution;
mod uuid;
pub use crate::event_database::RecordedEvent;
use crate::histogram::{Functional, Histogram, PrecomputedExponential, PrecomputedLinear};
pub use crate::metrics::datetime::Datetime;
use crate::util::get_iso_time_string;
use crate::CommonMetricData;
use crate::Glean;
pub use self::boolean::BooleanMetric;
pub use self::counter::CounterMetric;
pub use self::datetime::DatetimeMetric;
pub use self::event::EventMetric;
pub(crate) use self::experiment::ExperimentMetric;
pub use crate::histogram::HistogramType;
// Note: only expose RecordedExperimentData to tests in
// the next line, so that glean-core\src\lib.rs won't fail to build.
pub use self::custom_distribution::CustomDistributionMetric;
#[cfg(test)]
pub(crate) use self::experiment::RecordedExperimentData;
pub use self::labeled::{
combine_base_identifier_and_label, dynamic_label, strip_label, LabeledMetric,
};
pub use self::memory_distribution::MemoryDistributionMetric;
pub use self::memory_unit::MemoryUnit;
pub use self::ping::PingType;
pub use self::quantity::QuantityMetric;
pub use self::string::StringMetric;
pub use self::string_list::StringListMetric;
pub use self::time_unit::TimeUnit;
pub use self::timespan::TimespanMetric;
pub use self::timing_distribution::TimerId;
pub use self::timing_distribution::TimingDistributionMetric;
pub use self::uuid::UuidMetric;
/// A snapshot of all buckets and the accumulated sum of a distribution.
#[derive(Debug, Serialize)]
pub struct DistributionData {
/// A map containig the bucket index mapped to the accumulated count.
///
/// This can contain buckets with a count of `0`.
pub values: HashMap<u64, u64>,
/// The accumulated sum of all the samples in the distribution.
pub sum: u64,
}
/// The available metrics.
///
/// This is the in-memory and persisted layout of a metric.
///
/// ## Note
///
/// The order of metrics in this enum is important, as it is used for serialization.
/// Do not reorder the variants.
///
/// **Any new metric must be added at the end.**
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)]
pub enum Metric {
/// A boolean metric. See [`BooleanMetric`](struct.BooleanMetric.html) for more information.
Boolean(bool),
/// A counter metric. See [`CounterMetric`](struct.CounterMetric.html) for more information.
Counter(i32),
/// A custom distribution with precomputed exponential bucketing.
/// See [`CustomDistributionMetric`](struct.CustomDistributionMetric.html) for more information.
CustomDistributionExponential(Histogram<PrecomputedExponential>),
/// A custom distribution with precomputed linear bucketing.
/// See [`CustomDistributionMetric`](struct.CustomDistributionMetric.html) for more information.
CustomDistributionLinear(Histogram<PrecomputedLinear>),
/// A datetime metric. See [`DatetimeMetric`](struct.DatetimeMetric.html) for more information.
Datetime(DateTime<FixedOffset>, TimeUnit),
/// An experiment metric. See [`ExperimentMetric`](struct.ExperimentMetric.html) for more information.
Experiment(experiment::RecordedExperimentData),
/// A quantity metric. See [`QuantityMetric`](struct.QuantityMetric.html) for more information.
Quantity(i64),
/// A string metric. See [`StringMetric`](struct.StringMetric.html) for more information.
String(String),
/// A string list metric. See [`StringListMetric`](struct.StringListMetric.html) for more information.
StringList(Vec<String>),
/// A UUID metric. See [`UuidMetric`](struct.UuidMetric.html) for more information.
Uuid(String),
/// A timespan metric. See [`TimespanMetric`](struct.TimespanMetric.html) for more information.
Timespan(std::time::Duration, TimeUnit),
/// A timing distribution. See [`TimingDistributionMetric`](struct.TimingDistributionMetric.html) for more information.
TimingDistribution(Histogram<Functional>),
/// A memory distribution. See [`MemoryDistributionMetric`](struct.MemoryDistributionMetric.html) for more information.
MemoryDistribution(Histogram<Functional>),
}
/// A `MetricType` describes common behavior across all metrics.
pub trait MetricType {
/// Access the stored metadata
fn meta(&self) -> &CommonMetricData;
/// Access the stored metadata mutable
fn meta_mut(&mut self) -> &mut CommonMetricData;
/// Whether this metric should currently be recorded
///
/// This depends on the metrics own state, as determined by its metadata,
/// and whether upload is enabled on the Glean object.
fn should_record(&self, glean: &Glean) -> bool {
glean.is_upload_enabled() && self.meta().should_record()
}
}
impl Metric {
/// The ping section the metric fits into.
///
/// This determines the section of the ping to place the metric data in when
/// assembling the ping payload.
pub fn ping_section(&self) -> &'static str {
match self {
Metric::Boolean(_) => "boolean",
Metric::Counter(_) => "counter",
// Custom distributions are in the same section, no matter what bucketing.
Metric::CustomDistributionExponential(_) => "custom_distribution",
Metric::CustomDistributionLinear(_) => "custom_distribution",
Metric::Datetime(_, _) => "datetime",
Metric::Experiment(_) => panic!("Experiments should not be serialized through this"),
Metric::Quantity(_) => "quantity",
Metric::String(_) => "string",
Metric::StringList(_) => "string_list",
Metric::Timespan(..) => "timespan",
Metric::TimingDistribution(_) => "timing_distribution",
Metric::Uuid(_) => "uuid",
Metric::MemoryDistribution(_) => "memory_distribution",
}
}
/// The JSON representation of the metric's data
pub fn as_json(&self) -> JsonValue {
match self {
Metric::Boolean(b) => json!(b),
Metric::Counter(c) => json!(c),
Metric::CustomDistributionExponential(hist) => {
json!(custom_distribution::snapshot(hist))
}
Metric::CustomDistributionLinear(hist) => json!(custom_distribution::snapshot(hist)),
Metric::Datetime(d, time_unit) => json!(get_iso_time_string(*d, *time_unit)),
Metric::Experiment(e) => e.as_json(),
Metric::Quantity(q) => json!(q),
Metric::String(s) => json!(s),
Metric::StringList(v) => json!(v),
Metric::Timespan(time, time_unit) => {
json!({"value": time_unit.duration_convert(*time), "time_unit": time_unit})
}
Metric::TimingDistribution(hist) => json!(timing_distribution::snapshot(hist)),
Metric::Uuid(s) => json!(s),
Metric::MemoryDistribution(hist) => json!(memory_distribution::snapshot(hist)),
}
}
}

Просмотреть файл

@ -1,78 +1,74 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error::Result;
use crate::Glean;
/// Stores information about a ping.
///
/// This is required so that given metric data queued on disk we can send
/// pings with the correct settings, e.g. whether it has a client_id.
#[derive(Clone, Debug)]
pub struct PingType {
/// The name of the ping.
pub name: String,
/// Whether the ping should include the client ID.
pub include_client_id: bool,
/// Whether the ping should be sent if it is empty
pub send_if_empty: bool,
/// The "reason" codes that this ping can send
pub reason_codes: Vec<String>,
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl PingType {
/// Creates a new ping type for the given name, whether to include the client ID and whether to
/// send this ping empty.
///
/// # Arguments
///
/// * `name` - The name of the ping.
/// * `include_client_id` - Whether to include the client ID in the assembled ping when submitting.
/// * `send_if_empty` - Whether the ping should be sent empty or not.
/// * `reason_codes` - The valid reason codes for this ping.
pub fn new<A: Into<String>>(
name: A,
include_client_id: bool,
send_if_empty: bool,
reason_codes: Vec<String>,
) -> Self {
Self {
name: name.into(),
include_client_id,
send_if_empty,
reason_codes,
}
}
/// Submits the ping for eventual uploading
///
/// # Arguments
///
/// * `glean` - the Glean instance to use to send the ping.
/// * `reason` - the reason the ping was triggered. Included in the
/// `ping_info.reason` part of the payload.
///
/// # Returns
///
/// See [`Glean#submit_ping`](../struct.Glean.html#method.submit_ping) for details.
pub fn submit(&self, glean: &Glean, reason: Option<&str>) -> Result<bool> {
let corrected_reason = match reason {
Some(reason) => {
if self.reason_codes.contains(&reason.to_string()) {
Some(reason)
} else {
log::error!("Invalid reason code {} for ping {}", reason, self.name);
None
}
}
None => None,
};
glean.submit_ping(self, corrected_reason)
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use crate::error::Result;
use crate::Glean;
/// Stores information about a ping.
///
/// This is required so that given metric data queued on disk we can send
/// pings with the correct settings, e.g. whether it has a client_id.
#[derive(Clone, Debug)]
pub struct PingType {
/// The name of the ping.
pub name: String,
/// Whether the ping should include the client ID.
pub include_client_id: bool,
/// Whether the ping should be sent if it is empty
pub send_if_empty: bool,
/// The "reason" codes that this ping can send
pub reason_codes: Vec<String>,
}
impl PingType {
/// Create a new ping type for the given name, whether to include the client ID and whether to
/// send this ping empty.
///
/// ## Arguments
///
/// * `name` - The name of the ping.
/// * `include_client_id` - Whether to include the client ID in the assembled ping when submitting.
/// * `send_if_empty` - Whether the ping should be sent empty or not.
/// * `reason_codes` - The valid reason codes for this ping.
pub fn new<A: Into<String>>(
name: A,
include_client_id: bool,
send_if_empty: bool,
reason_codes: Vec<String>,
) -> Self {
Self {
name: name.into(),
include_client_id,
send_if_empty,
reason_codes,
}
}
/// Submit the ping for eventual uploading
///
/// ## Arguments
///
/// * `glean` - the Glean instance to use to send the ping.
/// * `reason` - the reason the ping was triggered. Included in the
/// `ping_info.reason` part of the payload.
///
/// ## Return value
///
/// See [`Glean#submit_ping`](../struct.Glean.html#method.submit_ping) for details.
pub fn submit(&self, glean: &Glean, reason: Option<&str>) -> Result<bool> {
let corrected_reason = match reason {
Some(reason) => {
if self.reason_codes.contains(&reason.to_string()) {
Some(reason)
} else {
log::error!("Invalid reason code {} for ping {}", reason, self.name);
None
}
}
None => None,
};
glean.submit_ping(self, corrected_reason)
}
}

Просмотреть файл

@ -27,19 +27,15 @@ impl MetricType for QuantityMetric {
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl QuantityMetric {
/// Creates a new quantity metric.
/// Create a new quantity metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets the value. Must be non-negative.
/// Set the value. Must be non-negative.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The value. Must be non-negative.
@ -70,7 +66,7 @@ impl QuantityMetric {
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
/// Get the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<i64> {

Просмотреть файл

@ -30,19 +30,15 @@ impl MetricType for StringMetric {
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl StringMetric {
/// Creates a new string metric.
/// Create a new string metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified value.
/// Set to the specified value.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The string to set the metric to.
@ -63,7 +59,7 @@ impl StringMetric {
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
/// Get the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {

Просмотреть файл

@ -33,19 +33,15 @@ impl MetricType for StringListMetric {
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl StringListMetric {
/// Creates a new string list metric.
/// Create a new string list metric.
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Adds a new string to the list.
/// Add a new string to the list.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The string to add.
@ -85,9 +81,9 @@ impl StringListMetric {
}
}
/// Sets to a specific list of strings.
/// Set to a specific list of strings.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The list of string to set the metric to.
@ -127,7 +123,7 @@ impl StringListMetric {
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored values.
/// Get the currently-stored values.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<Vec<String>> {
@ -143,7 +139,7 @@ impl StringListMetric {
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored values as a JSON String of the format
/// Get the currently-stored values as a JSON String of the format
/// ["string1", "string2", ...]
///
/// This doesn't clear the stored value.

Просмотреть файл

@ -1,117 +1,116 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use std::time::Duration;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
/// Different resolutions supported by the time related
/// metric types (e.g. DatetimeMetric).
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "lowercase")]
#[repr(i32)] // use i32 to be compatible with our JNA definition
pub enum TimeUnit {
/// Truncate to nanosecond precision.
Nanosecond,
/// Truncate to microsecond precision.
Microsecond,
/// Truncate to millisecond precision.
Millisecond,
/// Truncate to second precision.
Second,
/// Truncate to minute precision.
Minute,
/// Truncate to hour precision.
Hour,
/// Truncate to day precision.
Day,
}
impl TimeUnit {
/// Formats the given time unit, truncating the time if needed.
pub fn format_pattern(self) -> &'static str {
use TimeUnit::*;
match self {
Nanosecond => "%Y-%m-%dT%H:%M:%S%.f%:z",
Microsecond => "%Y-%m-%dT%H:%M:%S%.6f%:z",
Millisecond => "%Y-%m-%dT%H:%M:%S%.3f%:z",
Second => "%Y-%m-%dT%H:%M:%S%:z",
Minute => "%Y-%m-%dT%H:%M%:z",
Hour => "%Y-%m-%dT%H%:z",
Day => "%Y-%m-%d%:z",
}
}
/// Converts a duration to the requested time unit.
///
/// # Arguments
///
/// * `duration` - the duration to convert.
///
/// # Returns
///
/// The integer representation of the converted duration.
pub fn duration_convert(self, duration: Duration) -> u64 {
use TimeUnit::*;
match self {
Nanosecond => duration.as_nanos() as u64,
Microsecond => duration.as_micros() as u64,
Millisecond => duration.as_millis() as u64,
Second => duration.as_secs(),
Minute => duration.as_secs() / 60,
Hour => duration.as_secs() / 60 / 60,
Day => duration.as_secs() / 60 / 60 / 24,
}
}
/// Converts a duration in the given unit to nanoseconds.
///
/// # Arguments
///
/// * `duration` - the duration to convert.
///
/// # Returns
///
/// The integer representation of the nanosecond duration.
pub fn as_nanos(self, duration: u64) -> u64 {
use TimeUnit::*;
let duration = match self {
Nanosecond => Duration::from_nanos(duration),
Microsecond => Duration::from_micros(duration),
Millisecond => Duration::from_millis(duration),
Second => Duration::from_secs(duration),
Minute => Duration::from_secs(duration * 60),
Hour => Duration::from_secs(duration * 60 * 60),
Day => Duration::from_secs(duration * 60 * 60 * 24),
};
duration.as_nanos() as u64
}
}
/// Trait implementation for converting an integer value to a TimeUnit.
///
/// This is used in the FFI code.
///
/// Please note that values should match the ordering of the
/// platform specific side of things (e.g. Kotlin implementation).
impl TryFrom<i32> for TimeUnit {
type Error = Error;
fn try_from(value: i32) -> Result<TimeUnit, Self::Error> {
match value {
0 => Ok(TimeUnit::Nanosecond),
1 => Ok(TimeUnit::Microsecond),
2 => Ok(TimeUnit::Millisecond),
3 => Ok(TimeUnit::Second),
4 => Ok(TimeUnit::Minute),
5 => Ok(TimeUnit::Hour),
6 => Ok(TimeUnit::Day),
e => Err(ErrorKind::TimeUnit(e).into()),
}
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::convert::TryFrom;
use std::time::Duration;
use serde::{Deserialize, Serialize};
use crate::error::{Error, ErrorKind};
/// Different resolutions supported by the time related
/// metric types (e.g. DatetimeMetric).
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq)]
#[serde(rename_all = "lowercase")]
#[repr(i32)] // use i32 to be compatible with our JNA definition
pub enum TimeUnit {
/// Truncate to nanosecond precision.
Nanosecond,
/// Truncate to microsecond precision.
Microsecond,
/// Truncate to millisecond precision.
Millisecond,
/// Truncate to second precision.
Second,
/// Truncate to minute precision.
Minute,
/// Truncate to hour precision.
Hour,
/// Truncate to day precision.
Day,
}
impl TimeUnit {
/// How to format the given TimeUnit, truncating
/// the time if needed.
pub fn format_pattern(self) -> &'static str {
use TimeUnit::*;
match self {
Nanosecond => "%Y-%m-%dT%H:%M:%S%.f%:z",
Microsecond => "%Y-%m-%dT%H:%M:%S%.6f%:z",
Millisecond => "%Y-%m-%dT%H:%M:%S%.3f%:z",
Second => "%Y-%m-%dT%H:%M:%S%:z",
Minute => "%Y-%m-%dT%H:%M%:z",
Hour => "%Y-%m-%dT%H%:z",
Day => "%Y-%m-%d%:z",
}
}
/// Convert a duration to the requested time unit.
///
/// ## Arguments
///
/// * `duration` - the duration to convert.
///
/// ## Return value
///
/// The integer representation of the converted duration.
pub fn duration_convert(self, duration: Duration) -> u64 {
use TimeUnit::*;
match self {
Nanosecond => duration.as_nanos() as u64,
Microsecond => duration.as_micros() as u64,
Millisecond => duration.as_millis() as u64,
Second => duration.as_secs(),
Minute => duration.as_secs() / 60,
Hour => duration.as_secs() / 60 / 60,
Day => duration.as_secs() / 60 / 60 / 24,
}
}
/// Convert a duration in the given unit to nanoseconds.
///
/// ## Arguments
///
/// * `duration` - the duration to convert.
///
/// ## Return value
///
/// The integer representation of the nanosecond duration.
pub fn as_nanos(self, duration: u64) -> u64 {
use TimeUnit::*;
let duration = match self {
Nanosecond => Duration::from_nanos(duration),
Microsecond => Duration::from_micros(duration),
Millisecond => Duration::from_millis(duration),
Second => Duration::from_secs(duration),
Minute => Duration::from_secs(duration * 60),
Hour => Duration::from_secs(duration * 60 * 60),
Day => Duration::from_secs(duration * 60 * 60 * 24),
};
duration.as_nanos() as u64
}
}
/// Trait implementation for converting an integer value
/// to a TimeUnit. This is used in the FFI code. Please
/// note that values should match the ordering of the platform
/// specific side of things (e.g. Kotlin implementation).
impl TryFrom<i32> for TimeUnit {
type Error = Error;
fn try_from(value: i32) -> Result<TimeUnit, Self::Error> {
match value {
0 => Ok(TimeUnit::Nanosecond),
1 => Ok(TimeUnit::Microsecond),
2 => Ok(TimeUnit::Millisecond),
3 => Ok(TimeUnit::Second),
4 => Ok(TimeUnit::Minute),
5 => Ok(TimeUnit::Hour),
6 => Ok(TimeUnit::Day),
e => Err(ErrorKind::TimeUnit(e).into()),
}
}
}

Просмотреть файл

@ -32,12 +32,8 @@ impl MetricType for TimespanMetric {
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl TimespanMetric {
/// Creates a new timespan metric.
/// Create a new timespan metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self {
meta,
@ -46,7 +42,7 @@ impl TimespanMetric {
}
}
/// Starts tracking time for the provided metric.
/// Start tracking time for the provided metric.
///
/// This records an error if it's already tracking time (i.e. start was already
/// called with no corresponding `stop`): in that case the original
@ -70,7 +66,7 @@ impl TimespanMetric {
self.start_time = Some(start_time);
}
/// Stops tracking time for the provided metric. Sets the metric to the elapsed time.
/// Stop tracking time for the provided metric. Sets the metric to the elapsed time.
///
/// This will record an error if no `start` was called.
pub fn set_stop(&mut self, glean: &Glean, stop_time: u64) {
@ -97,12 +93,12 @@ impl TimespanMetric {
self.set_raw(glean, duration, false);
}
/// Aborts a previous `start` call. No error is recorded if no `start` was called.
/// Abort a previous `start` call. No error is recorded if no `start` was called.
pub fn cancel(&mut self) {
self.start_time = None;
}
/// Explicitly sets the timespan value.
/// Explicitly set the timespan value.
///
/// This API should only be used if your library or application requires recording
/// times in a way that can not make use of `start`/`stop`/`cancel`.
@ -111,7 +107,7 @@ impl TimespanMetric {
/// timespan measurement. To be safe, `set_raw` should generally be followed by
/// sending a custom ping containing the timespan.
///
/// # Arguments
/// ## Arguments
///
/// * `elapsed` - The elapsed time to record.
/// * `overwrite` - Whether or not to overwrite existing data.
@ -162,7 +158,7 @@ impl TimespanMetric {
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
/// Get the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<u64> {

Просмотреть файл

@ -1,408 +1,398 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
// The base of the logarithm used to determine bucketing
const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 8.0;
// Maximum time, which means we retain a maximum of 316 buckets.
// It is automatically adjusted based on the `time_unit` parameter
// so that:
//
// - `nanosecond` - 10 minutes
// - `microsecond` - ~6.94 days
// - `millisecond` - ~19 years
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
/// Identifier for a running timer.
pub type TimerId = u64;
#[derive(Debug, Clone)]
struct Timings {
next_id: TimerId,
start_times: HashMap<TimerId, u64>,
}
/// Track different running timers, identified by a `TimerId`.
impl Timings {
/// Create a new timing manager.
fn new() -> Self {
Self {
next_id: 0,
start_times: HashMap::new(),
}
}
/// Start a new timer and set it to the `start_time`.
///
/// Returns a new `TimerId` identifying the timer.
fn set_start(&mut self, start_time: u64) -> TimerId {
let id = self.next_id;
self.next_id += 1;
self.start_times.insert(id, start_time);
id
}
/// Stop the timer and return the elapsed time.
///
/// Returns an error if the `id` does not correspond to a running timer.
/// Returns an error if the stop time is before the start time.
///
/// ## Note
///
/// This API exists to satisfy the FFI requirements, where the clock is handled on the
/// application side and passed in as a timestamp.
fn set_stop(&mut self, id: TimerId, stop_time: u64) -> Result<u64, (ErrorType, &str)> {
let start_time = match self.start_times.remove(&id) {
Some(start_time) => start_time,
None => return Err((ErrorType::InvalidState, "Timing not running")),
};
let duration = match stop_time.checked_sub(start_time) {
Some(duration) => duration,
None => {
return Err((
ErrorType::InvalidValue,
"Timer stopped with negative duration",
))
}
};
Ok(duration)
}
/// Cancel and remove the timer.
fn cancel(&mut self, id: TimerId) {
self.start_times.remove(&id);
}
}
/// A timing distribution metric.
///
/// Timing distributions are used to accumulate and store time measurement, for analyzing distributions of the timing data.
#[derive(Debug)]
pub struct TimingDistributionMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
timings: Timings,
}
/// Create a snapshot of the histogram with a time unit.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
sum: hist.sum(),
}
}
impl MetricType for TimingDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl TimingDistributionMetric {
/// Creates a new timing distribution metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self {
meta,
time_unit,
timings: Timings::new(),
}
}
/// Starts tracking time for the provided metric.
///
/// This records an error if its already tracking time (i.e. start was already
/// called with no corresponding [stop]): in that case the original
/// start time will be preserved.
///
/// # Arguments
///
/// * `start_time` - Timestamp in nanoseconds.
///
/// # Returns
///
/// A unique `TimerId` for the new timer.
pub fn set_start(&mut self, start_time: u64) -> TimerId {
self.timings.set_start(start_time)
}
/// Stops tracking time for the provided metric and associated timer id.
///
/// Adds a count to the corresponding bucket in the timing distribution.
/// This will record an error if no `start` was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timespan metric.
/// * `stop_time` - Timestamp in nanoseconds.
pub fn set_stop_and_accumulate(&mut self, glean: &Glean, id: TimerId, stop_time: u64) {
// Duration is in nanoseconds.
let mut duration = match self.timings.set_stop(id, stop_time) {
Err((err_type, err_msg)) => {
record_error(glean, &self.meta, err_type, err_msg, None);
return;
}
Ok(duration) => duration,
};
let min_sample_time = self.time_unit.as_nanos(1);
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
duration = if duration < min_sample_time {
// If measurement is less than the minimum, just truncate. This is
// not recorded as an error.
min_sample_time
} else if duration > max_sample_time {
let msg = format!(
"Sample is longer than the max for a time_unit of {:?} ({} ns)",
self.time_unit, max_sample_time
);
record_error(glean, &self.meta, ErrorType::InvalidOverflow, msg, None);
max_sample_time
} else {
duration
};
if !self.should_record(glean) {
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::TimingDistribution(mut hist)) => {
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
});
}
/// Aborts a previous `set_start` call. No error is recorded if no `set_start`
/// was called.
///
/// # Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timing distribution metric.
pub fn cancel(&mut self, id: TimerId) {
self.timings.cancel(id);
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [TimingDistributionMetricType] and the instance this
/// method was called on is using [TimeUnit.Second], then `samples` are assumed
/// to be in that unit).
///
/// # Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them. Reports an `ErrorType::InvalidOverflow` error for samples that
/// are longer than `MAX_SAMPLE_TIME`.
pub fn accumulate_samples_signed(&mut self, glean: &Glean, samples: Vec<i64>) {
if !self.should_record(glean) {
return;
}
let mut num_negative_samples = 0;
let mut num_too_long_samples = 0;
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::TimingDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let mut sample = sample as u64;
// Check the range prior to converting the incoming unit to
// nanoseconds, so we can compare against the constant
// MAX_SAMPLE_TIME.
if sample == 0 {
sample = 1;
} else if sample > MAX_SAMPLE_TIME {
num_too_long_samples += 1;
sample = MAX_SAMPLE_TIME;
}
sample = self.time_unit.as_nanos(sample);
hist.accumulate(sample);
}
}
Metric::TimingDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_long_samples > 0 {
let msg = format!(
"{} samples are longer than the maximum of {}",
num_too_long_samples, max_sample_time
);
record_error(
glean,
&self.meta,
ErrorType::InvalidOverflow,
msg,
num_too_long_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::TimingDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_snapshot() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
for i in 1..=10 {
hist.accumulate(i);
}
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 55,
"values": {
"1": 1,
"2": 1,
"3": 1,
"4": 1,
"5": 1,
"6": 1,
"7": 1,
"8": 1,
"9": 1,
"10": 1,
"11": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
#[test]
fn can_snapshot_sparse() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
hist.accumulate(1024);
hist.accumulate(1024);
hist.accumulate(1116);
hist.accumulate(1448);
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 4612,
"values": {
"1024": 2,
"1116": 1,
"1217": 0,
"1327": 0,
"1448": 1,
"1579": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
}
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
use std::collections::HashMap;
use crate::error_recording::{record_error, ErrorType};
use crate::histogram::{Functional, Histogram};
use crate::metrics::time_unit::TimeUnit;
use crate::metrics::{DistributionData, Metric, MetricType};
use crate::storage::StorageManager;
use crate::CommonMetricData;
use crate::Glean;
// The base of the logarithm used to determine bucketing
const LOG_BASE: f64 = 2.0;
// The buckets per each order of magnitude of the logarithm.
const BUCKETS_PER_MAGNITUDE: f64 = 8.0;
// Maximum time, which means we retain a maximum of 316 buckets.
// It is automatically adjusted based on the `time_unit` parameter
// so that:
//
// - `nanosecond`: 10 minutes
// - `microsecond`: ~6.94 days
// - `millisecond`: ~19 years
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
/// Identifier for a running timer.
pub type TimerId = u64;
#[derive(Debug, Clone)]
struct Timings {
next_id: TimerId,
start_times: HashMap<TimerId, u64>,
}
/// Track different running timers, identified by a `TimerId`.
impl Timings {
/// Create a new timing manager.
fn new() -> Self {
Self {
next_id: 0,
start_times: HashMap::new(),
}
}
/// Start a new timer and set it to the `start_time`.
///
/// Returns a new `TimerId` identifying the timer.
fn set_start(&mut self, start_time: u64) -> TimerId {
let id = self.next_id;
self.next_id += 1;
self.start_times.insert(id, start_time);
id
}
/// Stop the timer and return the elapsed time.
///
/// Returns an error if the `id` does not correspond to a running timer.
/// Returns an error if the stop time is before the start time.
///
/// ## Note
///
/// This API exists to satisfy the FFI requirements, where the clock is handled on the
/// application side and passed in as a timestamp.
fn set_stop(&mut self, id: TimerId, stop_time: u64) -> Result<u64, (ErrorType, &str)> {
let start_time = match self.start_times.remove(&id) {
Some(start_time) => start_time,
None => return Err((ErrorType::InvalidState, "Timing not running")),
};
let duration = match stop_time.checked_sub(start_time) {
Some(duration) => duration,
None => {
return Err((
ErrorType::InvalidValue,
"Timer stopped with negative duration",
))
}
};
Ok(duration)
}
/// Cancel and remove the timer.
fn cancel(&mut self, id: TimerId) {
self.start_times.remove(&id);
}
}
/// A timing distribution metric.
///
/// Timing distributions are used to accumulate and store time measurement, for analyzing distributions of the timing data.
#[derive(Debug)]
pub struct TimingDistributionMetric {
meta: CommonMetricData,
time_unit: TimeUnit,
timings: Timings,
}
/// Create a snapshot of the histogram with a time unit.
///
/// The snapshot can be serialized into the payload format.
pub(crate) fn snapshot(hist: &Histogram<Functional>) -> DistributionData {
DistributionData {
// **Caution**: This cannot use `Histogram::snapshot_values` and needs to use the more
// specialized snapshot function.
values: hist.snapshot(),
sum: hist.sum(),
}
}
impl MetricType for TimingDistributionMetric {
fn meta(&self) -> &CommonMetricData {
&self.meta
}
fn meta_mut(&mut self) -> &mut CommonMetricData {
&mut self.meta
}
}
impl TimingDistributionMetric {
/// Create a new timing distribution metric.
pub fn new(meta: CommonMetricData, time_unit: TimeUnit) -> Self {
Self {
meta,
time_unit,
timings: Timings::new(),
}
}
/// Start tracking time for the provided metric.
///
/// This records an error if its already tracking time (i.e. start was already
/// called with no corresponding [stop]): in that case the original
/// start time will be preserved.
///
/// ## Arguments
///
/// * `start_time` - Timestamp in nanoseconds.
///
/// ## Return value
///
/// Returns a unique `TimerId` for the new timer.
pub fn set_start(&mut self, start_time: u64) -> TimerId {
self.timings.set_start(start_time)
}
/// Stop tracking time for the provided metric and associated timer id.
/// Add a count to the corresponding bucket in the timing distribution.
/// This will record an error if no `start` was called.
///
/// ## Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timespan metric.
/// * `stop_time` - Timestamp in nanoseconds.
pub fn set_stop_and_accumulate(&mut self, glean: &Glean, id: TimerId, stop_time: u64) {
// Duration is in nanoseconds.
let mut duration = match self.timings.set_stop(id, stop_time) {
Err((err_type, err_msg)) => {
record_error(glean, &self.meta, err_type, err_msg, None);
return;
}
Ok(duration) => duration,
};
let min_sample_time = self.time_unit.as_nanos(1);
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
duration = if duration < min_sample_time {
// If measurement is less than the minimum, just truncate. This is
// not recorded as an error.
min_sample_time
} else if duration > max_sample_time {
let msg = format!(
"Sample is longer than the max for a time_unit of {:?} ({} ns)",
self.time_unit, max_sample_time
);
record_error(glean, &self.meta, ErrorType::InvalidOverflow, msg, None);
max_sample_time
} else {
duration
};
if !self.should_record(glean) {
return;
}
glean
.storage()
.record_with(glean, &self.meta, |old_value| match old_value {
Some(Metric::TimingDistribution(mut hist)) => {
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
_ => {
let mut hist = Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE);
hist.accumulate(duration);
Metric::TimingDistribution(hist)
}
});
}
/// Abort a previous `set_start` call. No error is recorded if no `set_start`
/// was called.
///
/// ## Arguments
///
/// * `id` - The `TimerId` to associate with this timing. This allows
/// for concurrent timing of events associated with different ids to the
/// same timing distribution metric.
pub fn cancel(&mut self, id: TimerId) {
self.timings.cancel(id);
}
/// Accumulates the provided signed samples in the metric.
///
/// This is required so that the platform-specific code can provide us with
/// 64 bit signed integers if no `u64` comparable type is available. This
/// will take care of filtering and reporting errors for any provided negative
/// sample.
///
/// Please note that this assumes that the provided samples are already in the
/// "unit" declared by the instance of the implementing metric type (e.g. if the
/// implementing class is a [TimingDistributionMetricType] and the instance this
/// method was called on is using [TimeUnit.Second], then `samples` are assumed
/// to be in that unit).
///
/// ## Arguments
///
/// * `samples` - The vector holding the samples to be recorded by the metric.
///
/// ## Notes
///
/// Discards any negative value in `samples` and report an `ErrorType::InvalidValue`
/// for each of them. Reports an `ErrorType::InvalidOverflow` error for samples that
/// are longer than `MAX_SAMPLE_TIME`.
pub fn accumulate_samples_signed(&mut self, glean: &Glean, samples: Vec<i64>) {
let mut num_negative_samples = 0;
let mut num_too_long_samples = 0;
let max_sample_time = self.time_unit.as_nanos(MAX_SAMPLE_TIME);
glean.storage().record_with(glean, &self.meta, |old_value| {
let mut hist = match old_value {
Some(Metric::TimingDistribution(hist)) => hist,
_ => Histogram::functional(LOG_BASE, BUCKETS_PER_MAGNITUDE),
};
for &sample in samples.iter() {
if sample < 0 {
num_negative_samples += 1;
} else {
let mut sample = sample as u64;
// Check the range prior to converting the incoming unit to
// nanoseconds, so we can compare against the constant
// MAX_SAMPLE_TIME.
if sample == 0 {
sample = 1;
} else if sample > MAX_SAMPLE_TIME {
num_too_long_samples += 1;
sample = MAX_SAMPLE_TIME;
}
sample = self.time_unit.as_nanos(sample);
hist.accumulate(sample);
}
}
Metric::TimingDistribution(hist)
});
if num_negative_samples > 0 {
let msg = format!("Accumulated {} negative samples", num_negative_samples);
record_error(
glean,
&self.meta,
ErrorType::InvalidValue,
msg,
num_negative_samples,
);
}
if num_too_long_samples > 0 {
let msg = format!(
"{} samples are longer than the maximum of {}",
num_too_long_samples, max_sample_time
);
record_error(
glean,
&self.meta,
ErrorType::InvalidOverflow,
msg,
num_too_long_samples,
);
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently stored value as an integer.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<DistributionData> {
match StorageManager.snapshot_metric(
glean.storage(),
storage_name,
&self.meta.identifier(glean),
) {
Some(Metric::TimingDistribution(hist)) => Some(snapshot(&hist)),
_ => None,
}
}
/// **Test-only API (exported for FFI purposes).**
///
/// Get the currently-stored histogram as a JSON String of the serialized value.
///
/// This doesn't clear the stored value.
pub fn test_get_value_as_json_string(
&self,
glean: &Glean,
storage_name: &str,
) -> Option<String> {
self.test_get_value(glean, storage_name)
.map(|snapshot| serde_json::to_string(&snapshot).unwrap())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_snapshot() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
for i in 1..=10 {
hist.accumulate(i);
}
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 55,
"values": {
"1": 1,
"2": 1,
"3": 1,
"4": 1,
"5": 1,
"6": 1,
"7": 1,
"8": 1,
"9": 1,
"10": 1,
"11": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
#[test]
fn can_snapshot_sparse() {
use serde_json::json;
let mut hist = Histogram::functional(2.0, 8.0);
hist.accumulate(1024);
hist.accumulate(1024);
hist.accumulate(1116);
hist.accumulate(1448);
let snap = snapshot(&hist);
let expected_json = json!({
"sum": 4612,
"values": {
"1024": 2,
"1116": 1,
"1217": 0,
"1327": 0,
"1448": 1,
"1579": 0,
},
});
assert_eq!(expected_json, json!(snap));
}
}

Просмотреть файл

@ -28,19 +28,15 @@ impl MetricType for UuidMetric {
}
}
// IMPORTANT:
//
// When changing this implementation, make sure all the operations are
// also declared in the related trait in `../traits/`.
impl UuidMetric {
/// Creates a new UUID metric
/// Create a new UUID metric
pub fn new(meta: CommonMetricData) -> Self {
Self { meta }
}
/// Sets to the specified value.
/// Set to the specified value.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
/// * `value` - The UUID to set the metric to.
@ -54,9 +50,9 @@ impl UuidMetric {
glean.storage().record(glean, &self.meta, &value)
}
/// Generates a new random UUID and set the metric to it.
/// Generate a new random UUID and set the metric to it.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - The Glean instance this metric belongs to.
pub fn generate_and_set(&self, storage: &Glean) -> Uuid {
@ -65,16 +61,16 @@ impl UuidMetric {
uuid
}
/// Gets the stored Uuid value.
/// Get the stored Uuid value.
///
/// # Arguments
/// ## Arguments
///
/// * `glean` - the Glean instance this metric belongs to.
/// * `storage_name` - the storage name to look into.
///
/// # Returns
/// ## Return value
///
/// The stored value or `None` if nothing stored.
/// Returns the stored value or `None` if nothing stored.
pub(crate) fn get_value(&self, glean: &Glean, storage_name: &str) -> Option<Uuid> {
match StorageManager.snapshot_metric(
glean.storage(),
@ -88,7 +84,7 @@ impl UuidMetric {
/// **Test-only API (exported for FFI purposes).**
///
/// Gets the currently stored value as a string.
/// Get the currently stored value as a string.
///
/// This doesn't clear the stored value.
pub fn test_get_value(&self, glean: &Glean, storage_name: &str) -> Option<String> {

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше