зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1626125 - Vendor sync15-traits into mozilla-central. r=lina
Vendor sync15-traits from application-services. Differential Revision: https://phabricator.services.mozilla.com/D69099 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
e84dae97d8
Коммит
5265a52464
|
@ -17,6 +17,11 @@ git = "https://github.com/mozilla/neqo"
|
|||
replace-with = "vendored-sources"
|
||||
tag = "v0.2.2"
|
||||
|
||||
[source."https://github.com/mozilla/application-services"]
|
||||
git = "https://github.com/mozilla/application-services"
|
||||
replace-with = "vendored-sources"
|
||||
rev = "120e51dd5f2aab4194cf0f7e93b2a8923f4504bb"
|
||||
|
||||
[source."https://github.com/mozilla-spidermonkey/jsparagus"]
|
||||
git = "https://github.com/mozilla-spidermonkey/jsparagus"
|
||||
replace-with = "vendored-sources"
|
||||
|
|
|
@ -1700,6 +1700,7 @@ dependencies = [
|
|||
"shift_or_euc_c",
|
||||
"static_prefs",
|
||||
"storage",
|
||||
"sync15-traits",
|
||||
"unic-langid",
|
||||
"unic-langid-ffi",
|
||||
"webrender_bindings",
|
||||
|
@ -4201,6 +4202,28 @@ dependencies = [
|
|||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sync-guid"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=120e51dd5f2aab4194cf0f7e93b2a8923f4504bb#120e51dd5f2aab4194cf0f7e93b2a8923f4504bb"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sync15-traits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=120e51dd5f2aab4194cf0f7e93b2a8923f4504bb#120e51dd5f2aab4194cf0f7e93b2a8923f4504bb"
|
||||
dependencies = [
|
||||
"failure",
|
||||
"ffi-support",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sync-guid",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.1"
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -0,0 +1,101 @@
|
|||
language: rust
|
||||
# sudo is required to enable kcov to use the personality syscall
|
||||
sudo: required
|
||||
dist: trusty
|
||||
cache: cargo
|
||||
|
||||
rust:
|
||||
- nightly
|
||||
- beta
|
||||
- stable
|
||||
- 1.31.0
|
||||
|
||||
env:
|
||||
matrix:
|
||||
- FEATURES='--features "regexp regexp_macros"'
|
||||
|
||||
before_script:
|
||||
- eval git pull --rebase https://github.com/Geal/nom master
|
||||
- eval git log --pretty=oneline HEAD~5..HEAD
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- rust: nightly
|
||||
env: FEATURES='--no-default-features'
|
||||
- rust: nightly
|
||||
env: FEATURES='--no-default-features --features "alloc"'
|
||||
- rust: stable
|
||||
env: FEATURES=''
|
||||
- rust: nightly
|
||||
env: DOC_FEATURES='--features "std lexical regexp regexp_macros" --no-default-features'
|
||||
before_script:
|
||||
- export PATH=$HOME/.cargo/bin:$PATH
|
||||
script:
|
||||
- eval cargo doc --verbose $DOC_FEATURES
|
||||
- rust: nightly
|
||||
env: FEATURES=''
|
||||
before_script:
|
||||
- export PATH=$HOME/.cargo/bin:$PATH
|
||||
- cargo install cargo-update || echo "cargo-update already installed"
|
||||
- cargo install cargo-travis || echo "cargo-travis already installed"
|
||||
- cargo install-update -a
|
||||
- mkdir -p target/kcov-master
|
||||
script:
|
||||
cargo coveralls --verbose --all-features
|
||||
allow_failures:
|
||||
- rust: stable
|
||||
env: FEATURES=''
|
||||
before_script:
|
||||
- export PATH=$HOME/.cargo/bin:$PATH
|
||||
- rustup component add rustfmt-preview
|
||||
script:
|
||||
- eval cargo fmt -- --write-mode=diff
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/9c035a194ac4fd4cc061
|
||||
on_success: change
|
||||
on_failure: always
|
||||
on_start: false
|
||||
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- libcurl4-openssl-dev
|
||||
- libelf-dev
|
||||
- libdw-dev
|
||||
- binutils-dev
|
||||
- cmake
|
||||
sources:
|
||||
- kalakris-cmake
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- /home/travis/.cargo
|
||||
|
||||
before_cache:
|
||||
- rm -rf /home/travis/.cargo/registry
|
||||
|
||||
script:
|
||||
- eval cargo build --verbose $FEATURES
|
||||
- eval cargo test --verbose $FEATURES
|
||||
|
||||
after_success: |
|
||||
case "$TRAVIS_RUST_VERSION" in
|
||||
nightly)
|
||||
if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then
|
||||
git fetch &&
|
||||
git checkout master &&
|
||||
cargo bench --verbose
|
||||
fi
|
||||
|
||||
if [ "$FEATURES" == '--features "regexp regexp_macros"' ]; then
|
||||
cargo bench --verbose
|
||||
fi
|
||||
;;
|
||||
|
||||
*)
|
||||
;;
|
||||
esac
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"b5cc525d2aa129f84cb3f729a579217591c7705e2be78dbd348a95fc354831be","src/lib.rs":"729e562be4e63ec7db2adc00753a019ae77c11ce82637a893ea18122580c3c98","src/rusqlite_support.rs":"827d314605d8c741efdf238a0780a891c88bc56026a3e6dcfa534772a4852fb3","src/serde_support.rs":"519b5eb59ca7be555d522f2186909db969069dc9586a5fe4047d4ec176b2368a"},"package":null}
|
|
@ -0,0 +1,22 @@
|
|||
[package]
|
||||
name = "sync-guid"
|
||||
version = "0.1.0"
|
||||
authors = ["Thom Chiovoloni <tchiovoloni@mozilla.com>"]
|
||||
license = "MPL-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
rusqlite = { version = "0.21.0", optional = true }
|
||||
serde = { version = "1.0.104", optional = true }
|
||||
rand = { version = "0.7", optional = true }
|
||||
base64 = { version = "0.12.0", optional = true }
|
||||
|
||||
[features]
|
||||
random = ["rand", "base64"]
|
||||
rusqlite_support = ["rusqlite"]
|
||||
serde_support = ["serde"]
|
||||
# By default we support serde, but not rusqlite.
|
||||
default = ["serde_support"]
|
||||
|
||||
[dev-dependencies]
|
||||
serde_test = "1.0.104"
|
|
@ -0,0 +1,466 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#![allow(unknown_lints)]
|
||||
#![warn(rust_2018_idioms)]
|
||||
// (It's tempting to avoid the utf8 checks, but they're easy to get wrong, so)
|
||||
#![deny(unsafe_code)]
|
||||
#[cfg(feature = "serde_support")]
|
||||
mod serde_support;
|
||||
|
||||
#[cfg(feature = "rusqlite_support")]
|
||||
mod rusqlite_support;
|
||||
|
||||
use std::{
|
||||
cmp::Ordering,
|
||||
fmt,
|
||||
hash::{Hash, Hasher},
|
||||
ops, str,
|
||||
};
|
||||
|
||||
/// This is a type intended to be used to represent the guids used by sync. It
|
||||
/// has several benefits over using a `String`:
|
||||
///
|
||||
/// 1. It's more explicit about what is being stored, and could prevent bugs
|
||||
/// where a Guid is passed to a function expecting text.
|
||||
///
|
||||
/// 2. Guids are guaranteed to be immutable.
|
||||
///
|
||||
/// 3. It's optimized for the guids commonly used by sync. In particular, short guids
|
||||
/// (including the guids which would meet `PlacesUtils.isValidGuid`) do not incur
|
||||
/// any heap allocation, and are stored inline.
|
||||
#[derive(Clone)]
|
||||
pub struct Guid(Repr);
|
||||
|
||||
// The internal representation of a GUID. Most Sync GUIDs are 12 bytes,
|
||||
// and contain only base64url characters; we can store them on the stack
|
||||
// without a heap allocation. However, arbitrary ascii guids of up to length 64
|
||||
// are possible, in which case we fall back to a heap-allocated string.
|
||||
//
|
||||
// This is separate only because making `Guid` an enum would expose the
|
||||
// internals.
|
||||
#[derive(Clone)]
|
||||
enum Repr {
|
||||
// see FastGuid for invariants
|
||||
Fast(FastGuid),
|
||||
|
||||
// invariants:
|
||||
// - _0.len() > MAX_FAST_GUID_LEN
|
||||
Slow(String),
|
||||
}
|
||||
|
||||
/// Invariants:
|
||||
///
|
||||
/// - `len <= MAX_FAST_GUID_LEN`.
|
||||
/// - `data[0..len]` encodes valid utf8.
|
||||
/// - `data[len..].iter().all(|&b| b == b'\0')`
|
||||
///
|
||||
/// Note: None of these are required for memory safety, just correctness.
|
||||
#[derive(Clone)]
|
||||
struct FastGuid {
|
||||
len: u8,
|
||||
data: [u8; MAX_FAST_GUID_LEN],
|
||||
}
|
||||
|
||||
// This is the maximum length (experimentally determined) we can make it before
|
||||
// `Repr::Fast` is larger than `Guid::Slow` on 32 bit systems. The important
|
||||
// thing is really that it's not too big, and is above 12 bytes.
|
||||
const MAX_FAST_GUID_LEN: usize = 14;
|
||||
|
||||
impl FastGuid {
|
||||
#[inline]
|
||||
fn from_slice(bytes: &[u8]) -> Self {
|
||||
// Checked by the caller, so debug_assert is fine.
|
||||
debug_assert!(
|
||||
can_use_fast(bytes),
|
||||
"Bug: Caller failed to check can_use_fast: {:?}",
|
||||
bytes
|
||||
);
|
||||
let mut data = [0u8; MAX_FAST_GUID_LEN];
|
||||
data[0..bytes.len()].copy_from_slice(bytes);
|
||||
FastGuid {
|
||||
len: bytes.len() as u8,
|
||||
data,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn as_str(&self) -> &str {
|
||||
// Note: we only use debug_assert! to enusre valid utf8-ness, so this need
|
||||
str::from_utf8(self.bytes()).expect("Invalid fast guid bytes!")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn len(&self) -> usize {
|
||||
self.len as usize
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn bytes(&self) -> &[u8] {
|
||||
&self.data[0..self.len()]
|
||||
}
|
||||
}
|
||||
|
||||
// Returns:
|
||||
// - true to use Repr::Fast
|
||||
// - false to use Repr::Slow
|
||||
#[inline]
|
||||
fn can_use_fast<T: ?Sized + AsRef<[u8]>>(bytes: &T) -> bool {
|
||||
let bytes = bytes.as_ref();
|
||||
// This is fine as a debug_assert since we'll still panic if it's ever used
|
||||
// in such a way where it would matter.
|
||||
debug_assert!(str::from_utf8(bytes).is_ok());
|
||||
bytes.len() <= MAX_FAST_GUID_LEN
|
||||
}
|
||||
|
||||
impl Guid {
|
||||
/// Create a guid from a `str`.
|
||||
#[inline]
|
||||
pub fn new(s: &str) -> Self {
|
||||
Guid::from_slice(s.as_ref())
|
||||
}
|
||||
|
||||
/// Create an empty guid. Usable as a constant.
|
||||
#[inline]
|
||||
pub const fn empty() -> Self {
|
||||
Guid(Repr::Fast(FastGuid {
|
||||
len: 0,
|
||||
data: [0u8; MAX_FAST_GUID_LEN],
|
||||
}))
|
||||
}
|
||||
|
||||
/// Create a random guid (of 12 base64url characters). Requires the `random`
|
||||
/// feature.
|
||||
#[cfg(feature = "random")]
|
||||
pub fn random() -> Self {
|
||||
let bytes: [u8; 9] = rand::random();
|
||||
|
||||
// Note: only first 12 bytes are used, but remaining are required to
|
||||
// build the FastGuid
|
||||
let mut output = [0u8; MAX_FAST_GUID_LEN];
|
||||
|
||||
let bytes_written =
|
||||
base64::encode_config_slice(&bytes, base64::URL_SAFE_NO_PAD, &mut output[..12]);
|
||||
|
||||
debug_assert!(bytes_written == 12);
|
||||
|
||||
Guid(Repr::Fast(FastGuid {
|
||||
len: 12,
|
||||
data: output,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Convert `b` into a `Guid`.
|
||||
#[inline]
|
||||
pub fn from_string(s: String) -> Self {
|
||||
Guid::from_vec(s.into_bytes())
|
||||
}
|
||||
|
||||
/// Convert `b` into a `Guid`.
|
||||
#[inline]
|
||||
pub fn from_slice(b: &[u8]) -> Self {
|
||||
if can_use_fast(b) {
|
||||
Guid(Repr::Fast(FastGuid::from_slice(b)))
|
||||
} else {
|
||||
Guid::new_slow(b.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert `v` to a `Guid`, consuming it.
|
||||
#[inline]
|
||||
pub fn from_vec(v: Vec<u8>) -> Self {
|
||||
if can_use_fast(&v) {
|
||||
Guid(Repr::Fast(FastGuid::from_slice(&v)))
|
||||
} else {
|
||||
Guid::new_slow(v)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the data backing this `Guid` as a `&[u8]`.
|
||||
#[inline]
|
||||
pub fn as_bytes(&self) -> &[u8] {
|
||||
match &self.0 {
|
||||
Repr::Fast(rep) => rep.bytes(),
|
||||
Repr::Slow(rep) => rep.as_ref(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the data backing this `Guid` as a `&str`.
|
||||
#[inline]
|
||||
pub fn as_str(&self) -> &str {
|
||||
match &self.0 {
|
||||
Repr::Fast(rep) => rep.as_str(),
|
||||
Repr::Slow(rep) => rep.as_ref(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert this `Guid` into a `String`, consuming it in the process.
|
||||
#[inline]
|
||||
pub fn into_string(self) -> String {
|
||||
match self.0 {
|
||||
Repr::Fast(rep) => rep.as_str().into(),
|
||||
Repr::Slow(rep) => rep,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true for Guids that are deemed valid by the sync server.
|
||||
/// See https://github.com/mozilla-services/server-syncstorage/blob/d92ef07877aebd05b92f87f6ade341d6a55bffc8/syncstorage/bso.py#L24
|
||||
pub fn is_valid_for_sync_server(&self) -> bool {
|
||||
!self.is_empty()
|
||||
&& self.len() <= 64
|
||||
&& self.bytes().all(|b| b >= b' ' && b <= b'~' && b != b',')
|
||||
}
|
||||
|
||||
/// Returns true for Guids that are valid places guids, and false for all others.
|
||||
pub fn is_valid_for_places(&self) -> bool {
|
||||
self.len() == 12 && self.bytes().all(Guid::is_valid_places_byte)
|
||||
}
|
||||
|
||||
/// Returns true if the byte `b` is a valid base64url byte.
|
||||
#[inline]
|
||||
pub fn is_valid_places_byte(b: u8) -> bool {
|
||||
BASE64URL_BYTES[b as usize] == 1
|
||||
}
|
||||
|
||||
#[cold]
|
||||
fn new_slow(v: Vec<u8>) -> Self {
|
||||
assert!(
|
||||
!can_use_fast(&v),
|
||||
"Could use fast for guid (len = {})",
|
||||
v.len()
|
||||
);
|
||||
Guid(Repr::Slow(
|
||||
String::from_utf8(v).expect("Invalid slow guid bytes!"),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// This is used to implement the places tests.
|
||||
const BASE64URL_BYTES: [u8; 256] = [
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
|
||||
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
|
||||
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
];
|
||||
|
||||
impl Ord for Guid {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.as_bytes().cmp(&other.as_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for Guid {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Guid {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.as_bytes() == other.as_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Guid {}
|
||||
|
||||
impl Hash for Guid {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.as_bytes().hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for Guid {
|
||||
#[inline]
|
||||
fn from(s: &'a str) -> Guid {
|
||||
Guid::from_slice(s.as_ref())
|
||||
}
|
||||
}
|
||||
impl<'a> From<&'a &str> for Guid {
|
||||
#[inline]
|
||||
fn from(s: &'a &str) -> Guid {
|
||||
Guid::from_slice(s.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a [u8]> for Guid {
|
||||
#[inline]
|
||||
fn from(s: &'a [u8]) -> Guid {
|
||||
Guid::from_slice(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Guid {
|
||||
#[inline]
|
||||
fn from(s: String) -> Guid {
|
||||
Guid::from_string(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for Guid {
|
||||
#[inline]
|
||||
fn from(v: Vec<u8>) -> Guid {
|
||||
Guid::from_vec(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Guid> for String {
|
||||
#[inline]
|
||||
fn from(guid: Guid) -> String {
|
||||
guid.into_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Guid> for Vec<u8> {
|
||||
#[inline]
|
||||
fn from(guid: Guid) -> Vec<u8> {
|
||||
guid.into_string().into_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for Guid {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &str {
|
||||
self.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for Guid {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.as_bytes()
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::Deref for Guid {
|
||||
type Target = str;
|
||||
#[inline]
|
||||
fn deref(&self) -> &str {
|
||||
self.as_str()
|
||||
}
|
||||
}
|
||||
|
||||
// The default Debug impl is pretty unhelpful here.
|
||||
impl fmt::Debug for Guid {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Guid({:?})", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Guid {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Display::fmt(self.as_str(), f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::default::Default for Guid {
|
||||
/// Create a default guid by calling `Guid::empty()`
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Guid::empty()
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_guid_eq {
|
||||
($($other: ty),+) => {$(
|
||||
impl<'a> PartialEq<$other> for Guid {
|
||||
#[inline]
|
||||
fn eq(&self, other: &$other) -> bool {
|
||||
PartialEq::eq(AsRef::<[u8]>::as_ref(self), AsRef::<[u8]>::as_ref(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PartialEq<Guid> for $other {
|
||||
#[inline]
|
||||
fn eq(&self, other: &Guid) -> bool {
|
||||
PartialEq::eq(AsRef::<[u8]>::as_ref(self), AsRef::<[u8]>::as_ref(other))
|
||||
}
|
||||
}
|
||||
)+}
|
||||
}
|
||||
|
||||
// Implement direct comparison with some common types from the stdlib.
|
||||
impl_guid_eq![str, &'a str, String, [u8], &'a [u8], Vec<u8>];
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_base64url_bytes() {
|
||||
let mut expect = [0u8; 256];
|
||||
for b in b'0'..=b'9' {
|
||||
expect[b as usize] = 1;
|
||||
}
|
||||
for b in b'a'..=b'z' {
|
||||
expect[b as usize] = 1;
|
||||
}
|
||||
for b in b'A'..=b'Z' {
|
||||
expect[b as usize] = 1;
|
||||
}
|
||||
expect[b'_' as usize] = 1;
|
||||
expect[b'-' as usize] = 1;
|
||||
assert_eq!(&BASE64URL_BYTES[..], &expect[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_valid_for_places() {
|
||||
assert!(Guid::from("aaaabbbbcccc").is_valid_for_places());
|
||||
assert!(Guid::from_slice(b"09_az-AZ_09-").is_valid_for_places());
|
||||
assert!(!Guid::from("aaaabbbbccccd").is_valid_for_places()); // too long
|
||||
assert!(!Guid::from("aaaabbbbccc").is_valid_for_places()); // too short
|
||||
assert!(!Guid::from("aaaabbbbccc=").is_valid_for_places()); // right length, bad character
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_comparison() {
|
||||
assert_eq!(Guid::from("abcdabcdabcd"), "abcdabcdabcd");
|
||||
assert_ne!(Guid::from("abcdabcdabcd".to_string()), "ABCDabcdabcd");
|
||||
|
||||
assert_eq!(Guid::from("abcdabcdabcd"), &b"abcdabcdabcd"[..]); // b"abcdabcdabcd" has type &[u8; 12]...
|
||||
assert_ne!(Guid::from(&b"abcdabcdabcd"[..]), &b"ABCDabcdabcd"[..]);
|
||||
|
||||
assert_eq!(
|
||||
Guid::from(b"abcdabcdabcd"[..].to_owned()),
|
||||
"abcdabcdabcd".to_string()
|
||||
);
|
||||
assert_ne!(Guid::from("abcdabcdabcd"), "ABCDabcdabcd".to_string());
|
||||
|
||||
assert_eq!(
|
||||
Guid::from("abcdabcdabcd1234"),
|
||||
Vec::from(b"abcdabcdabcd1234".as_ref())
|
||||
);
|
||||
assert_ne!(
|
||||
Guid::from("abcdabcdabcd4321"),
|
||||
Vec::from(b"ABCDabcdabcd4321".as_ref())
|
||||
);
|
||||
|
||||
// order by data instead of length
|
||||
assert!(Guid::from("zzz") > Guid::from("aaaaaa"));
|
||||
assert!(Guid::from("ThisIsASolowGuid") < Guid::from("zzz"));
|
||||
assert!(Guid::from("ThisIsASolowGuid") > Guid::from("AnotherSlowGuid"));
|
||||
}
|
||||
|
||||
#[cfg(feature = "random")]
|
||||
#[test]
|
||||
fn test_random() {
|
||||
use std::collections::HashSet;
|
||||
// Used to verify uniqueness within our sample of 1000. Could cause
|
||||
// random failures, but desktop has the same test, and it's never caused
|
||||
// a problem AFAIK.
|
||||
let mut seen: HashSet<String> = HashSet::new();
|
||||
for _ in 0..1000 {
|
||||
let g = Guid::random();
|
||||
assert_eq!(g.len(), 12);
|
||||
assert!(g.is_valid_for_places());
|
||||
let decoded = base64::decode_config(&g, base64::URL_SAFE_NO_PAD).unwrap();
|
||||
assert_eq!(decoded.len(), 9);
|
||||
let no_collision = seen.insert(g.clone().into_string());
|
||||
assert!(no_collision, "{}", g);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#![cfg(feature = "rusqlite_support")]
|
||||
|
||||
use crate::Guid;
|
||||
use rusqlite::{
|
||||
self,
|
||||
types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef},
|
||||
};
|
||||
|
||||
impl ToSql for Guid {
|
||||
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
|
||||
Ok(ToSqlOutput::from(self.as_str()))
|
||||
}
|
||||
}
|
||||
|
||||
impl FromSql for Guid {
|
||||
fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
|
||||
value.as_str().map(Guid::from)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#![cfg(feature = "serde_support")]
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use serde::{
|
||||
de::{self, Deserialize, Deserializer, Visitor},
|
||||
ser::{Serialize, Serializer},
|
||||
};
|
||||
|
||||
use crate::Guid;
|
||||
|
||||
struct GuidVisitor;
|
||||
impl<'de> Visitor<'de> for GuidVisitor {
|
||||
type Value = Guid;
|
||||
#[inline]
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
formatter.write_str("a sync guid")
|
||||
}
|
||||
#[inline]
|
||||
fn visit_str<E: de::Error>(self, s: &str) -> Result<Self::Value, E> {
|
||||
Ok(Guid::from_slice(s.as_ref()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for Guid {
|
||||
#[inline]
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_str(GuidVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Guid {
|
||||
#[inline]
|
||||
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
serializer.serialize_str(self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use serde_test::{assert_tokens, Token};
|
||||
#[test]
|
||||
fn test_ser_de() {
|
||||
let guid = Guid::from("asdffdsa12344321");
|
||||
assert_tokens(&guid, &[Token::Str("asdffdsa12344321")]);
|
||||
|
||||
let guid = Guid::from("");
|
||||
assert_tokens(&guid, &[Token::Str("")]);
|
||||
|
||||
let guid = Guid::from(&b"abcd43211234"[..]);
|
||||
assert_tokens(&guid, &[Token::Str("abcd43211234")]);
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"326b1c017a76b1987e34c6dde0fa57f2c85d5de23a9f0cf1dfb029cc99d34471","README.md":"396105211d8ce7f40b05d8062d7ab55d99674555f3ac81c061874ae26656ed7e","src/changeset.rs":"442aa92b5130ec0f8f2b0054acb399c547380e0060015cbf4ca7a72027440d54","src/client.rs":"6be4f550ade823fafc350c5490e031f90a4af833a9bba9739b05568464255a74","src/lib.rs":"9abce82e0248c8aa7e3d55b7db701b95e8f337f6e5d1319381f995a0b708400d","src/payload.rs":"09db1a444e7893990a4f03cb16263b9c15abc9e48ec4f1343227be1b490865a5","src/request.rs":"9e656ec487e53c7485643687e605d73bb25e138056e920d6f4b7d63fc6a8c460","src/server_timestamp.rs":"43d1b98a90e55e49380a0b66c209c9eb393e2aeaa27d843a4726d93cdd4cea02","src/store.rs":"10e215dd24270b6bec10903ac1d5274ce997eb437134f43be7de44e36fb9d1e4","src/telemetry.rs":"027befb099a6fcded3457f7e566296548a0898ff613267190621856b9ef288f6"},"package":null}
|
|
@ -0,0 +1,18 @@
|
|||
[package]
|
||||
name = "sync15-traits"
|
||||
version = "0.1.0"
|
||||
authors = ["Thom Chiovoloni <tchiovoloni@mozilla.com>"]
|
||||
license = "MPL-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[features]
|
||||
random-guid = ["sync-guid/random"]
|
||||
|
||||
[dependencies]
|
||||
sync-guid = { path = "../guid" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
log = "0.4"
|
||||
ffi-support = "0.4"
|
||||
url = "2.1"
|
||||
failure = "0.1.6"
|
|
@ -0,0 +1,4 @@
|
|||
# sync15-traits
|
||||
|
||||
Extracted types and traits from sync15. Usable for cases where depending on the
|
||||
sync15 crate is impossible (like in remerge).
|
|
@ -0,0 +1,33 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use crate::{Payload, ServerTimestamp};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RecordChangeset<P> {
|
||||
pub changes: Vec<P>,
|
||||
/// For GETs, the last sync timestamp that should be persisted after
|
||||
/// applying the records.
|
||||
/// For POSTs, this is the XIUS timestamp.
|
||||
pub timestamp: ServerTimestamp,
|
||||
pub collection: std::borrow::Cow<'static, str>,
|
||||
}
|
||||
|
||||
pub type IncomingChangeset = RecordChangeset<(Payload, ServerTimestamp)>;
|
||||
pub type OutgoingChangeset = RecordChangeset<Payload>;
|
||||
|
||||
// TODO: use a trait to unify this with the non-json versions
|
||||
impl<T> RecordChangeset<T> {
|
||||
#[inline]
|
||||
pub fn new(
|
||||
collection: impl Into<std::borrow::Cow<'static, str>>,
|
||||
timestamp: ServerTimestamp,
|
||||
) -> RecordChangeset<T> {
|
||||
RecordChangeset {
|
||||
changes: vec![],
|
||||
timestamp,
|
||||
collection: collection.into(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
//! This module has to be here because of some hard-to-avoid hacks done for the
|
||||
//! tabs engine... See issue #2590
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Argument to Store::prepare_for_sync. See comment there for more info. Only
|
||||
/// really intended to be used by tabs engine.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClientData {
|
||||
pub local_client_id: String,
|
||||
pub recent_clients: HashMap<String, RemoteClient>,
|
||||
}
|
||||
|
||||
/// Information about a remote client in the clients collection.
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||
pub struct RemoteClient {
|
||||
pub fxa_device_id: Option<String>,
|
||||
pub device_name: String,
|
||||
pub device_type: Option<DeviceType>,
|
||||
}
|
||||
|
||||
/// The type of a client. Please keep these variants in sync with the device
|
||||
/// types in the FxA client and sync manager.
|
||||
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
|
||||
pub enum DeviceType {
|
||||
Desktop,
|
||||
Mobile,
|
||||
Tablet,
|
||||
VR,
|
||||
TV,
|
||||
}
|
||||
|
||||
impl DeviceType {
|
||||
pub fn try_from_str(d: impl AsRef<str>) -> Option<DeviceType> {
|
||||
match d.as_ref() {
|
||||
"desktop" => Some(DeviceType::Desktop),
|
||||
"mobile" => Some(DeviceType::Mobile),
|
||||
"tablet" => Some(DeviceType::Tablet),
|
||||
"vr" => Some(DeviceType::VR),
|
||||
"tv" => Some(DeviceType::TV),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
DeviceType::Desktop => "desktop",
|
||||
DeviceType::Mobile => "mobile",
|
||||
DeviceType::Tablet => "tablet",
|
||||
DeviceType::VR => "vr",
|
||||
DeviceType::TV => "tv",
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#![warn(rust_2018_idioms)]
|
||||
mod changeset;
|
||||
pub mod client;
|
||||
mod payload;
|
||||
pub mod request;
|
||||
mod server_timestamp;
|
||||
mod store;
|
||||
pub mod telemetry;
|
||||
|
||||
pub use changeset::{IncomingChangeset, OutgoingChangeset, RecordChangeset};
|
||||
pub use payload::Payload;
|
||||
pub use request::{CollectionRequest, RequestOrder};
|
||||
pub use server_timestamp::ServerTimestamp;
|
||||
pub use store::{CollSyncIds, Store, StoreSyncAssociation};
|
||||
pub use sync_guid::Guid;
|
||||
|
||||
// For skip_serializing_if
|
||||
pub(crate) fn skip_if_default<T: PartialEq + Default>(v: &T) -> bool {
|
||||
*v == T::default()
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
use super::Guid;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value as JsonValue};
|
||||
|
||||
/// Represents the decrypted payload in a Bso. Provides a minimal layer of type
|
||||
/// safety to avoid double-encrypting.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Payload {
|
||||
pub id: Guid,
|
||||
|
||||
#[serde(default)]
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
pub deleted: bool,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub data: Map<String, JsonValue>,
|
||||
}
|
||||
|
||||
impl Payload {
|
||||
pub fn new_tombstone(id: impl Into<Guid>) -> Payload {
|
||||
Payload {
|
||||
id: id.into(),
|
||||
deleted: true,
|
||||
data: Map::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_tombstone_with_ttl(id: impl Into<Guid>, ttl: u32) -> Payload {
|
||||
let mut result = Payload::new_tombstone(id);
|
||||
result.data.insert("ttl".into(), ttl.into());
|
||||
result
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn with_sortindex(mut self, index: i32) -> Payload {
|
||||
self.data.insert("sortindex".into(), index.into());
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn id(&self) -> &str {
|
||||
&self.id[..]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn is_tombstone(&self) -> bool {
|
||||
self.deleted
|
||||
}
|
||||
|
||||
pub fn from_json(value: JsonValue) -> Result<Payload, serde_json::Error> {
|
||||
serde_json::from_value(value)
|
||||
}
|
||||
|
||||
pub fn into_record<T>(self) -> Result<T, serde_json::Error>
|
||||
where
|
||||
for<'a> T: Deserialize<'a>,
|
||||
{
|
||||
serde_json::from_value(JsonValue::from(self))
|
||||
}
|
||||
|
||||
pub fn from_record<T: Serialize>(v: T) -> Result<Payload, serde_json::Error> {
|
||||
// TODO(issue #2588): This is kind of dumb, we do to_value and then
|
||||
// from_value. In general a more strongly typed API would help us avoid
|
||||
// this sort of thing... But also concretely this could probably be
|
||||
// avoided? At least in some cases.
|
||||
Ok(Payload::from_json(serde_json::to_value(v)?)?)
|
||||
}
|
||||
|
||||
pub fn into_json_string(self) -> String {
|
||||
serde_json::to_string(&JsonValue::from(self))
|
||||
.expect("JSON.stringify failed, which shouldn't be possible")
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Payload> for JsonValue {
|
||||
fn from(cleartext: Payload) -> Self {
|
||||
let Payload {
|
||||
mut data,
|
||||
id,
|
||||
deleted,
|
||||
} = cleartext;
|
||||
data.insert("id".to_string(), JsonValue::String(id.into_string()));
|
||||
if deleted {
|
||||
data.insert("deleted".to_string(), JsonValue::Bool(true));
|
||||
}
|
||||
JsonValue::Object(data)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
use crate::{Guid, ServerTimestamp};
|
||||
use std::borrow::Cow;
|
||||
use url::{form_urlencoded as form, Url, UrlQuery};
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct CollectionRequest {
|
||||
pub collection: Cow<'static, str>,
|
||||
pub full: bool,
|
||||
pub ids: Option<Vec<Guid>>,
|
||||
pub limit: usize,
|
||||
pub older: Option<ServerTimestamp>,
|
||||
pub newer: Option<ServerTimestamp>,
|
||||
pub order: Option<RequestOrder>,
|
||||
pub commit: bool,
|
||||
pub batch: Option<String>,
|
||||
}
|
||||
|
||||
impl CollectionRequest {
|
||||
#[inline]
|
||||
pub fn new<S>(collection: S) -> CollectionRequest
|
||||
where
|
||||
S: Into<Cow<'static, str>>,
|
||||
{
|
||||
CollectionRequest {
|
||||
collection: collection.into(),
|
||||
full: false,
|
||||
ids: None,
|
||||
limit: 0,
|
||||
older: None,
|
||||
newer: None,
|
||||
order: None,
|
||||
commit: false,
|
||||
batch: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ids<V>(mut self, v: V) -> CollectionRequest
|
||||
where
|
||||
V: IntoIterator,
|
||||
V::Item: Into<Guid>,
|
||||
{
|
||||
self.ids = Some(v.into_iter().map(|id| id.into()).collect());
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn full(mut self) -> CollectionRequest {
|
||||
self.full = true;
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn older_than(mut self, ts: ServerTimestamp) -> CollectionRequest {
|
||||
self.older = Some(ts);
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn newer_than(mut self, ts: ServerTimestamp) -> CollectionRequest {
|
||||
self.newer = Some(ts);
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn sort_by(mut self, order: RequestOrder) -> CollectionRequest {
|
||||
self.order = Some(order);
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn limit(mut self, num: usize) -> CollectionRequest {
|
||||
self.limit = num;
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn batch(mut self, batch: Option<String>) -> CollectionRequest {
|
||||
self.batch = batch;
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn commit(mut self, v: bool) -> CollectionRequest {
|
||||
self.commit = v;
|
||||
self
|
||||
}
|
||||
|
||||
fn build_query(&self, pairs: &mut form::Serializer<'_, UrlQuery<'_>>) {
|
||||
if self.full {
|
||||
pairs.append_pair("full", "1");
|
||||
}
|
||||
if self.limit > 0 {
|
||||
pairs.append_pair("limit", &self.limit.to_string());
|
||||
}
|
||||
if let Some(ids) = &self.ids {
|
||||
// Most ids are 12 characters, and we comma separate them, so 13.
|
||||
let mut buf = String::with_capacity(ids.len() * 13);
|
||||
for (i, id) in ids.iter().enumerate() {
|
||||
if i > 0 {
|
||||
buf.push(',');
|
||||
}
|
||||
buf.push_str(id.as_str());
|
||||
}
|
||||
pairs.append_pair("ids", &buf);
|
||||
}
|
||||
if let Some(batch) = &self.batch {
|
||||
pairs.append_pair("batch", &batch);
|
||||
}
|
||||
if self.commit {
|
||||
pairs.append_pair("commit", "true");
|
||||
}
|
||||
if let Some(ts) = self.older {
|
||||
pairs.append_pair("older", &ts.to_string());
|
||||
}
|
||||
if let Some(ts) = self.newer {
|
||||
pairs.append_pair("newer", &ts.to_string());
|
||||
}
|
||||
if let Some(o) = self.order {
|
||||
pairs.append_pair("sort", o.as_str());
|
||||
}
|
||||
pairs.finish();
|
||||
}
|
||||
|
||||
pub fn build_url(&self, mut base_url: Url) -> Result<Url, UnacceptableBaseUrl> {
|
||||
base_url
|
||||
.path_segments_mut()
|
||||
.map_err(|_| UnacceptableBaseUrl(()))?
|
||||
.extend(&["storage", &self.collection]);
|
||||
self.build_query(&mut base_url.query_pairs_mut());
|
||||
// This is strange but just accessing query_pairs_mut makes you have
|
||||
// a trailing question mark on your url. I don't think anything bad
|
||||
// would happen here, but I don't know, and also, it looks dumb so
|
||||
// I'd rather not have it.
|
||||
if base_url.query() == Some("") {
|
||||
base_url.set_query(None);
|
||||
}
|
||||
Ok(base_url)
|
||||
}
|
||||
}
|
||||
#[derive(Debug)]
|
||||
pub struct UnacceptableBaseUrl(());
|
||||
|
||||
impl std::fmt::Display for UnacceptableBaseUrl {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str("Storage server URL is not a base")
|
||||
}
|
||||
}
|
||||
impl std::error::Error for UnacceptableBaseUrl {}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub enum RequestOrder {
|
||||
Oldest,
|
||||
Newest,
|
||||
Index,
|
||||
}
|
||||
|
||||
impl RequestOrder {
|
||||
#[inline]
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
RequestOrder::Oldest => "oldest",
|
||||
RequestOrder::Newest => "newest",
|
||||
RequestOrder::Index => "index",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RequestOrder {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.as_str())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
use std::marker::PhantomData;
|
||||
use std::time::Duration;
|
||||
|
||||
/// Typesafe way to manage server timestamps without accidentally mixing them up with
|
||||
/// local ones.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Default)]
|
||||
pub struct ServerTimestamp(pub i64);
|
||||
|
||||
impl ServerTimestamp {
|
||||
pub fn from_float_seconds(ts: f64) -> Self {
|
||||
let rf = (ts * 1000.0).round();
|
||||
if !rf.is_finite() || rf < 0.0 || rf >= i64::max_value() as f64 {
|
||||
log::error!("Illegal timestamp: {}", ts);
|
||||
ServerTimestamp(0)
|
||||
} else {
|
||||
ServerTimestamp(rf as i64)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_millis(ts: i64) -> Self {
|
||||
// Catch it in tests, but just complain and replace with 0 otherwise.
|
||||
debug_assert!(ts >= 0, "Bad timestamp: {}", ts);
|
||||
if ts >= 0 {
|
||||
Self(ts)
|
||||
} else {
|
||||
log::error!("Illegal timestamp, substituting 0: {}", ts);
|
||||
Self(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This lets us use these in hyper header! blocks.
|
||||
impl std::str::FromStr for ServerTimestamp {
|
||||
type Err = std::num::ParseFloatError;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let val = f64::from_str(s)?;
|
||||
Ok(Self::from_float_seconds(val))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ServerTimestamp {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0 as f64 / 1000.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl ServerTimestamp {
|
||||
pub const EPOCH: ServerTimestamp = ServerTimestamp(0);
|
||||
|
||||
/// Returns None if `other` is later than `self` (Duration may not represent
|
||||
/// negative timespans in rust).
|
||||
#[inline]
|
||||
pub fn duration_since(self, other: ServerTimestamp) -> Option<Duration> {
|
||||
let delta = self.0 - other.0;
|
||||
if delta < 0 {
|
||||
None
|
||||
} else {
|
||||
Some(Duration::from_millis(delta as u64))
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the milliseconds for the timestamp.
|
||||
#[inline]
|
||||
pub fn as_millis(self) -> i64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::ser::Serialize for ServerTimestamp {
|
||||
fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
|
||||
serializer.serialize_f64(self.0 as f64 / 1000.0)
|
||||
}
|
||||
}
|
||||
|
||||
struct TimestampVisitor(PhantomData<ServerTimestamp>);
|
||||
|
||||
impl<'de> serde::de::Visitor<'de> for TimestampVisitor {
|
||||
type Value = ServerTimestamp;
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
formatter.write_str("a floating point number")
|
||||
}
|
||||
|
||||
fn visit_f64<E: serde::de::Error>(self, value: f64) -> Result<Self::Value, E> {
|
||||
Ok(ServerTimestamp::from_float_seconds(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::de::Deserialize<'de> for ServerTimestamp {
|
||||
fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
||||
deserializer.deserialize_f64(TimestampVisitor(PhantomData))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_server_timestamp() {
|
||||
let t0 = ServerTimestamp(10_300_150);
|
||||
let t1 = ServerTimestamp(10_100_050);
|
||||
assert!(t1.duration_since(t0).is_none());
|
||||
assert!(t0.duration_since(t1).is_some());
|
||||
let dur = t0.duration_since(t1).unwrap();
|
||||
assert_eq!(dur.as_secs(), 200);
|
||||
assert_eq!(dur.subsec_nanos(), 100_000_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serde() {
|
||||
let ts = ServerTimestamp(123_456);
|
||||
|
||||
// test serialize
|
||||
let ser = serde_json::to_string(&ts).unwrap();
|
||||
assert_eq!("123.456".to_string(), ser);
|
||||
|
||||
// test deserialize
|
||||
let ts: ServerTimestamp = serde_json::from_str(&ser).unwrap();
|
||||
assert_eq!(ServerTimestamp(123_456), ts);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use crate::{
|
||||
client::ClientData, telemetry, CollectionRequest, Guid, IncomingChangeset, OutgoingChangeset,
|
||||
ServerTimestamp,
|
||||
};
|
||||
use failure::Error;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct CollSyncIds {
|
||||
pub global: Guid,
|
||||
pub coll: Guid,
|
||||
}
|
||||
|
||||
/// Defines how a store is associated with Sync.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum StoreSyncAssociation {
|
||||
/// This store is disconnected (although it may be connected in the future).
|
||||
Disconnected,
|
||||
/// Sync is connected, and has the following sync IDs.
|
||||
Connected(CollSyncIds),
|
||||
}
|
||||
|
||||
/// Low-level store functionality. Stores that need custom reconciliation logic
|
||||
/// should use this.
|
||||
///
|
||||
/// Different stores will produce errors of different types. To accommodate
|
||||
/// this, we force them all to return failure::Error.
|
||||
pub trait Store {
|
||||
fn collection_name(&self) -> std::borrow::Cow<'static, str>;
|
||||
|
||||
/// Prepares the store for syncing. The tabs store currently uses this to
|
||||
/// store the current list of clients, which it uses to look up device names
|
||||
/// and types.
|
||||
///
|
||||
/// Note that this method is only called by `sync_multiple`, and only if a
|
||||
/// command processor is registered. In particular, `prepare_for_sync` will
|
||||
/// not be called if the store is synced using `sync::synchronize` or
|
||||
/// `sync_multiple::sync_multiple`. It _will_ be called if the store is
|
||||
/// synced via the Sync Manager.
|
||||
///
|
||||
/// TODO(issue #2590): This is pretty cludgey and will be hard to extend for
|
||||
/// any case other than the tabs case. We should find another way to support
|
||||
/// tabs...
|
||||
fn prepare_for_sync(&self, _get_client_data: &dyn Fn() -> ClientData) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// `inbound` is a vector to support the case where
|
||||
/// `get_collection_requests` returned multiple requests. The changesets are
|
||||
/// in the same order as the requests were -- e.g. if `vec![req_a, req_b]`
|
||||
/// was returned from `get_collection_requests`, `inbound` will have the
|
||||
/// results from `req_a` as its first index, and those from `req_b` as it's
|
||||
/// second.
|
||||
fn apply_incoming(
|
||||
&self,
|
||||
inbound: Vec<IncomingChangeset>,
|
||||
telem: &mut telemetry::Engine,
|
||||
) -> Result<OutgoingChangeset, Error>;
|
||||
|
||||
fn sync_finished(
|
||||
&self,
|
||||
new_timestamp: ServerTimestamp,
|
||||
records_synced: Vec<Guid>,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// The store is responsible for building the collection request. Engines
|
||||
/// typically will store a lastModified timestamp and use that to build a
|
||||
/// request saying "give me full records since that date" - however, other
|
||||
/// engines might do something fancier. This could even later be extended to
|
||||
/// handle "backfills" etc
|
||||
///
|
||||
/// To support more advanced use cases (e.g. remerge), multiple requests can
|
||||
/// be returned here. The vast majority of engines will just want to return
|
||||
/// zero or one item in their vector (zero is a valid optimization when the
|
||||
/// server timestamp is the same as the engine last saw, one when it is not)
|
||||
///
|
||||
/// Important: In the case when more than one collection is requested, it's
|
||||
/// assumed the last one is the "canonical" one. (That is, it must be for
|
||||
/// "this" collection, its timestamp is used to represent the sync, etc).
|
||||
fn get_collection_requests(
|
||||
&self,
|
||||
server_timestamp: ServerTimestamp,
|
||||
) -> Result<Vec<CollectionRequest>, Error>;
|
||||
|
||||
/// Get persisted sync IDs. If they don't match the global state we'll be
|
||||
/// `reset()` with the new IDs.
|
||||
fn get_sync_assoc(&self) -> Result<StoreSyncAssociation, Error>;
|
||||
|
||||
/// Reset the store without wiping local data, ready for a "first sync".
|
||||
/// `assoc` defines how this store is to be associated with sync.
|
||||
fn reset(&self, assoc: &StoreSyncAssociation) -> Result<(), Error>;
|
||||
|
||||
fn wipe(&self) -> Result<(), Error>;
|
||||
}
|
|
@ -0,0 +1,777 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
//! Manage recording sync telemetry. Assumes some external telemetry
|
||||
//! library/code which manages submitting.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::time;
|
||||
|
||||
use serde::{ser, Serialize, Serializer};
|
||||
|
||||
// A test helper, used by the many test modules below.
|
||||
#[cfg(test)]
|
||||
fn assert_json<T: ?Sized>(v: &T, expected: serde_json::Value)
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
assert_eq!(
|
||||
serde_json::to_value(&v).expect("should get a value"),
|
||||
expected
|
||||
);
|
||||
}
|
||||
|
||||
/// What we record for 'when' and 'took' in a telemetry record.
|
||||
#[derive(Debug, Serialize)]
|
||||
struct WhenTook {
|
||||
when: f64,
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
took: u64,
|
||||
}
|
||||
|
||||
/// What we track while recording 'when' and 'took. It serializes as a WhenTook,
|
||||
/// except when .finished() hasn't been called, in which case it panics.
|
||||
#[derive(Debug)]
|
||||
enum Stopwatch {
|
||||
Started(time::SystemTime, time::Instant),
|
||||
Finished(WhenTook),
|
||||
}
|
||||
|
||||
impl Default for Stopwatch {
|
||||
fn default() -> Self {
|
||||
Stopwatch::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl Stopwatch {
|
||||
fn new() -> Self {
|
||||
Stopwatch::Started(time::SystemTime::now(), time::Instant::now())
|
||||
}
|
||||
|
||||
// For tests we don't want real timestamps because we test against literals.
|
||||
#[cfg(test)]
|
||||
fn finished(&self) -> Self {
|
||||
Stopwatch::Finished(WhenTook { when: 0.0, took: 0 })
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
fn finished(&self) -> Self {
|
||||
match self {
|
||||
Stopwatch::Started(st, si) => {
|
||||
let std = st.duration_since(time::UNIX_EPOCH).unwrap_or_default();
|
||||
let when = std.as_secs() as f64; // we don't want sub-sec accuracy. Do we need to write a float?
|
||||
|
||||
let sid = si.elapsed();
|
||||
let took = sid.as_secs() * 1000 + (u64::from(sid.subsec_nanos()) / 1_000_000);
|
||||
Stopwatch::Finished(WhenTook { when, took })
|
||||
}
|
||||
_ => {
|
||||
unreachable!("can't finish twice");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for Stopwatch {
|
||||
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match self {
|
||||
Stopwatch::Started(_, _) => Err(ser::Error::custom("StopWatch has not been finished")),
|
||||
Stopwatch::Finished(c) => c.serialize(serializer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod stopwatch_tests {
|
||||
use super::*;
|
||||
|
||||
// A wrapper struct because we flatten - this struct should serialize with
|
||||
// 'when' and 'took' keys (but with no 'sw'.)
|
||||
#[derive(Debug, Serialize)]
|
||||
struct WT {
|
||||
#[serde(flatten)]
|
||||
sw: Stopwatch,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_not_finished() {
|
||||
let wt = WT {
|
||||
sw: Stopwatch::new(),
|
||||
};
|
||||
serde_json::to_string(&wt).expect_err("unfinished stopwatch should fail");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
assert_json(
|
||||
&WT {
|
||||
sw: Stopwatch::Finished(WhenTook { when: 1.0, took: 1 }),
|
||||
},
|
||||
serde_json::json!({"when": 1.0, "took": 1}),
|
||||
);
|
||||
assert_json(
|
||||
&WT {
|
||||
sw: Stopwatch::Finished(WhenTook { when: 1.0, took: 0 }),
|
||||
},
|
||||
serde_json::json!({"when": 1.0}),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// A generic "Event" - suitable for all kinds of pings (although this module
|
||||
/// only cares about the sync ping)
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct Event {
|
||||
// We use static str references as we expect values to be literals.
|
||||
object: &'static str,
|
||||
|
||||
method: &'static str,
|
||||
|
||||
// Maybe "value" should be a string?
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
value: Option<&'static str>,
|
||||
|
||||
// we expect the keys to be literals but values are real strings.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
extra: Option<HashMap<&'static str, String>>,
|
||||
}
|
||||
|
||||
impl Event {
|
||||
pub fn new(object: &'static str, method: &'static str) -> Self {
|
||||
assert!(object.len() <= 20);
|
||||
assert!(method.len() <= 20);
|
||||
Self {
|
||||
object,
|
||||
method,
|
||||
value: None,
|
||||
extra: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn value(mut self, v: &'static str) -> Self {
|
||||
assert!(v.len() <= 80);
|
||||
self.value = Some(v);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn extra(mut self, key: &'static str, val: String) -> Self {
|
||||
assert!(key.len() <= 15);
|
||||
assert!(val.len() <= 85);
|
||||
match self.extra {
|
||||
None => self.extra = Some(HashMap::new()),
|
||||
Some(ref e) => assert!(e.len() < 10),
|
||||
}
|
||||
self.extra.as_mut().unwrap().insert(key, val);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_events {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_invalid_length_ctor() {
|
||||
Event::new("A very long object value", "Method");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_invalid_length_extra_key() {
|
||||
Event::new("O", "M").extra("A very long key value", "v".to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_invalid_length_extra_val() {
|
||||
let l = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||
Event::new("O", "M").extra("k", l.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_too_many_extras() {
|
||||
let l = "abcdefghijk";
|
||||
let mut e = Event::new("Object", "Method");
|
||||
for i in 0..l.len() {
|
||||
e = e.extra(&l[i..=i], "v".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_json() {
|
||||
assert_json(
|
||||
&Event::new("Object", "Method").value("Value"),
|
||||
serde_json::json!({"object": "Object", "method": "Method", "value": "Value"}),
|
||||
);
|
||||
|
||||
assert_json(
|
||||
&Event::new("Object", "Method").extra("one", "one".to_string()),
|
||||
serde_json::json!({"object": "Object",
|
||||
"method": "Method",
|
||||
"extra": {"one": "one"}
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A Sync failure.
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(tag = "name")]
|
||||
pub enum SyncFailure {
|
||||
#[serde(rename = "shutdownerror")]
|
||||
Shutdown,
|
||||
|
||||
#[serde(rename = "othererror")]
|
||||
Other { error: String },
|
||||
|
||||
#[serde(rename = "unexpectederror")]
|
||||
Unexpected { error: String },
|
||||
|
||||
#[serde(rename = "autherror")]
|
||||
Auth { from: &'static str },
|
||||
|
||||
#[serde(rename = "httperror")]
|
||||
Http { code: u16 },
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn reprs() {
|
||||
assert_json(
|
||||
&SyncFailure::Shutdown,
|
||||
serde_json::json!({"name": "shutdownerror"}),
|
||||
);
|
||||
|
||||
assert_json(
|
||||
&SyncFailure::Other {
|
||||
error: "dunno".to_string(),
|
||||
},
|
||||
serde_json::json!({"name": "othererror", "error": "dunno"}),
|
||||
);
|
||||
|
||||
assert_json(
|
||||
&SyncFailure::Unexpected {
|
||||
error: "dunno".to_string(),
|
||||
},
|
||||
serde_json::json!({"name": "unexpectederror", "error": "dunno"}),
|
||||
);
|
||||
|
||||
assert_json(
|
||||
&SyncFailure::Auth { from: "FxA" },
|
||||
serde_json::json!({"name": "autherror", "from": "FxA"}),
|
||||
);
|
||||
|
||||
assert_json(
|
||||
&SyncFailure::Http { code: 500 },
|
||||
serde_json::json!({"name": "httperror", "code": 500}),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Incoming record for an engine's sync
|
||||
#[derive(Debug, Default, Serialize)]
|
||||
pub struct EngineIncoming {
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
applied: u32,
|
||||
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
failed: u32,
|
||||
|
||||
#[serde(rename = "newFailed")]
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
new_failed: u32,
|
||||
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
reconciled: u32,
|
||||
}
|
||||
|
||||
impl EngineIncoming {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
// A helper used via skip_serializing_if
|
||||
fn is_empty(inc: &Option<Self>) -> bool {
|
||||
match inc {
|
||||
Some(a) => a.applied == 0 && a.failed == 0 && a.new_failed == 0 && a.reconciled == 0,
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment the value of `applied` by `n`.
|
||||
#[inline]
|
||||
pub fn applied(&mut self, n: u32) {
|
||||
self.applied += n;
|
||||
}
|
||||
|
||||
/// Increment the value of `failed` by `n`.
|
||||
#[inline]
|
||||
pub fn failed(&mut self, n: u32) {
|
||||
self.failed += n;
|
||||
}
|
||||
|
||||
/// Increment the value of `new_failed` by `n`.
|
||||
#[inline]
|
||||
pub fn new_failed(&mut self, n: u32) {
|
||||
self.new_failed += n;
|
||||
}
|
||||
|
||||
/// Increment the value of `reconciled` by `n`.
|
||||
#[inline]
|
||||
pub fn reconciled(&mut self, n: u32) {
|
||||
self.reconciled += n;
|
||||
}
|
||||
|
||||
/// Get the value of `applied`. Mostly useful for testing.
|
||||
#[inline]
|
||||
pub fn get_applied(&self) -> u32 {
|
||||
self.applied
|
||||
}
|
||||
|
||||
/// Get the value of `failed`. Mostly useful for testing.
|
||||
#[inline]
|
||||
pub fn get_failed(&self) -> u32 {
|
||||
self.failed
|
||||
}
|
||||
|
||||
/// Get the value of `new_failed`. Mostly useful for testing.
|
||||
#[inline]
|
||||
pub fn get_new_failed(&self) -> u32 {
|
||||
self.new_failed
|
||||
}
|
||||
|
||||
/// Get the value of `reconciled`. Mostly useful for testing.
|
||||
#[inline]
|
||||
pub fn get_reconciled(&self) -> u32 {
|
||||
self.reconciled
|
||||
}
|
||||
}
|
||||
|
||||
/// Outgoing record for an engine's sync
|
||||
#[derive(Debug, Default, Serialize)]
|
||||
pub struct EngineOutgoing {
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
sent: usize,
|
||||
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
failed: usize,
|
||||
}
|
||||
|
||||
impl EngineOutgoing {
|
||||
pub fn new() -> Self {
|
||||
EngineOutgoing {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn sent(&mut self, n: usize) {
|
||||
self.sent += n;
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn failed(&mut self, n: usize) {
|
||||
self.failed += n;
|
||||
}
|
||||
}
|
||||
|
||||
/// One engine's sync.
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct Engine {
|
||||
name: String,
|
||||
|
||||
#[serde(flatten)]
|
||||
when_took: Stopwatch,
|
||||
|
||||
#[serde(skip_serializing_if = "EngineIncoming::is_empty")]
|
||||
incoming: Option<EngineIncoming>,
|
||||
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
outgoing: Vec<EngineOutgoing>, // one for each batch posted.
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "failureReason")]
|
||||
failure: Option<SyncFailure>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
validation: Option<Validation>,
|
||||
}
|
||||
|
||||
impl Engine {
|
||||
pub fn new(name: impl Into<String>) -> Self {
|
||||
Self {
|
||||
name: name.into(),
|
||||
when_took: Stopwatch::new(),
|
||||
incoming: None,
|
||||
outgoing: Vec::new(),
|
||||
failure: None,
|
||||
validation: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn incoming(&mut self, inc: EngineIncoming) {
|
||||
assert!(self.incoming.is_none());
|
||||
self.incoming = Some(inc);
|
||||
}
|
||||
|
||||
pub fn outgoing(&mut self, out: EngineOutgoing) {
|
||||
self.outgoing.push(out);
|
||||
}
|
||||
|
||||
pub fn failure(&mut self, err: impl Into<SyncFailure>) {
|
||||
// Currently we take the first error, under the assumption that the
|
||||
// first is the most important and all others stem from that.
|
||||
let failure = err.into();
|
||||
if self.failure.is_none() {
|
||||
self.failure = Some(failure);
|
||||
} else {
|
||||
log::warn!(
|
||||
"engine already has recorded a failure of {:?} - ignoring {:?}",
|
||||
&self.failure,
|
||||
&failure
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validation(&mut self, v: Validation) {
|
||||
assert!(self.validation.is_none());
|
||||
self.validation = Some(v);
|
||||
}
|
||||
|
||||
fn finished(&mut self) {
|
||||
self.when_took = self.when_took.finished();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize)]
|
||||
pub struct Validation {
|
||||
version: u32,
|
||||
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
problems: Vec<Problem>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "failureReason")]
|
||||
failure: Option<SyncFailure>,
|
||||
}
|
||||
|
||||
impl Validation {
|
||||
pub fn with_version(version: u32) -> Validation {
|
||||
Validation {
|
||||
version,
|
||||
..Validation::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn problem(&mut self, name: &'static str, count: usize) -> &mut Self {
|
||||
if count > 0 {
|
||||
self.problems.push(Problem { name, count });
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Serialize)]
|
||||
pub struct Problem {
|
||||
name: &'static str,
|
||||
#[serde(skip_serializing_if = "crate::skip_if_default")]
|
||||
count: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod engine_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_engine() {
|
||||
let mut e = Engine::new("test_engine");
|
||||
e.finished();
|
||||
assert_json(&e, serde_json::json!({"name": "test_engine", "when": 0.0}));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_engine_not_finished() {
|
||||
let e = Engine::new("test_engine");
|
||||
serde_json::to_value(&e).expect_err("unfinished stopwatch should fail");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_incoming() {
|
||||
let mut i = EngineIncoming::new();
|
||||
i.applied(1);
|
||||
i.failed(2);
|
||||
let mut e = Engine::new("TestEngine");
|
||||
e.incoming(i);
|
||||
e.finished();
|
||||
assert_json(
|
||||
&e,
|
||||
serde_json::json!({"name": "TestEngine", "when": 0.0, "incoming": {"applied": 1, "failed": 2}}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_outgoing() {
|
||||
let mut o = EngineOutgoing::new();
|
||||
o.sent(2);
|
||||
o.failed(1);
|
||||
let mut e = Engine::new("TestEngine");
|
||||
e.outgoing(o);
|
||||
e.finished();
|
||||
assert_json(
|
||||
&e,
|
||||
serde_json::json!({"name": "TestEngine", "when": 0.0, "outgoing": [{"sent": 2, "failed": 1}]}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_failure() {
|
||||
let mut e = Engine::new("TestEngine");
|
||||
e.failure(SyncFailure::Http { code: 500 });
|
||||
e.finished();
|
||||
assert_json(
|
||||
&e,
|
||||
serde_json::json!({"name": "TestEngine",
|
||||
"when": 0.0,
|
||||
"failureReason": {"name": "httperror", "code": 500}
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_raw() {
|
||||
let mut e = Engine::new("TestEngine");
|
||||
let mut inc = EngineIncoming::new();
|
||||
inc.applied(10);
|
||||
e.incoming(inc);
|
||||
let mut out = EngineOutgoing::new();
|
||||
out.sent(1);
|
||||
e.outgoing(out);
|
||||
e.failure(SyncFailure::Http { code: 500 });
|
||||
e.finished();
|
||||
|
||||
assert_eq!(e.outgoing.len(), 1);
|
||||
assert_eq!(e.incoming.as_ref().unwrap().applied, 10);
|
||||
assert_eq!(e.outgoing[0].sent, 1);
|
||||
assert!(e.failure.is_some());
|
||||
serde_json::to_string(&e).expect("should get json");
|
||||
}
|
||||
}
|
||||
|
||||
/// A single sync. May have many engines, may have its own failure.
|
||||
#[derive(Debug, Serialize, Default)]
|
||||
pub struct SyncTelemetry {
|
||||
#[serde(flatten)]
|
||||
when_took: Stopwatch,
|
||||
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
engines: Vec<Engine>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "failureReason")]
|
||||
failure: Option<SyncFailure>,
|
||||
}
|
||||
|
||||
impl SyncTelemetry {
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
pub fn engine(&mut self, mut e: Engine) {
|
||||
e.finished();
|
||||
self.engines.push(e);
|
||||
}
|
||||
|
||||
pub fn failure(&mut self, failure: SyncFailure) {
|
||||
assert!(self.failure.is_none());
|
||||
self.failure = Some(failure);
|
||||
}
|
||||
|
||||
// Note that unlike other 'finished' methods, this isn't private - someone
|
||||
// needs to explicitly call this before handling the json payload to
|
||||
// whatever ends up submitting it.
|
||||
pub fn finished(&mut self) {
|
||||
self.when_took = self.when_took.finished();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod sync_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_accum() {
|
||||
let mut s = SyncTelemetry::new();
|
||||
let mut inc = EngineIncoming::new();
|
||||
inc.applied(10);
|
||||
let mut e = Engine::new("test_engine");
|
||||
e.incoming(inc);
|
||||
e.failure(SyncFailure::Http { code: 500 });
|
||||
e.finished();
|
||||
s.engine(e);
|
||||
s.finished();
|
||||
|
||||
assert_json(
|
||||
&s,
|
||||
serde_json::json!({
|
||||
"when": 0.0,
|
||||
"engines": [{
|
||||
"name":"test_engine",
|
||||
"when":0.0,
|
||||
"incoming": {
|
||||
"applied": 10
|
||||
},
|
||||
"failureReason": {
|
||||
"name": "httperror",
|
||||
"code": 500
|
||||
}
|
||||
}]
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_engine() {
|
||||
let mut inc_e1 = EngineIncoming::new();
|
||||
inc_e1.applied(1);
|
||||
let mut e1 = Engine::new("test_engine");
|
||||
e1.incoming(inc_e1);
|
||||
|
||||
let mut inc_e2 = EngineIncoming::new();
|
||||
inc_e2.failed(1);
|
||||
let mut e2 = Engine::new("test_engine_2");
|
||||
e2.incoming(inc_e2);
|
||||
let mut out_e2 = EngineOutgoing::new();
|
||||
out_e2.sent(1);
|
||||
e2.outgoing(out_e2);
|
||||
|
||||
let mut s = SyncTelemetry::new();
|
||||
s.engine(e1);
|
||||
s.engine(e2);
|
||||
s.failure(SyncFailure::Http { code: 500 });
|
||||
s.finished();
|
||||
assert_json(
|
||||
&s,
|
||||
serde_json::json!({
|
||||
"when": 0.0,
|
||||
"engines": [{
|
||||
"name": "test_engine",
|
||||
"when": 0.0,
|
||||
"incoming": {
|
||||
"applied": 1
|
||||
}
|
||||
},{
|
||||
"name": "test_engine_2",
|
||||
"when": 0.0,
|
||||
"incoming": {
|
||||
"failed": 1
|
||||
},
|
||||
"outgoing": [{
|
||||
"sent": 1
|
||||
}]
|
||||
}],
|
||||
"failureReason": {
|
||||
"name": "httperror",
|
||||
"code": 500
|
||||
}
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// The Sync ping payload, as documented at
|
||||
/// https://firefox-source-docs.mozilla.org/toolkit/components/telemetry/telemetry/data/sync-ping.html.
|
||||
/// May have many syncs, may have many events. However, due to the architecture
|
||||
/// of apps which use these components, this payload is almost certainly not
|
||||
/// suitable for submitting directly. For example, we will always return a
|
||||
/// payload with exactly 1 sync, and it will not know certain other fields
|
||||
/// in the payload, such as the *hashed* FxA device ID (see
|
||||
/// https://searchfox.org/mozilla-central/rev/c3ebaf6de2d481c262c04bb9657eaf76bf47e2ac/services/sync/modules/browserid_identity.js#185
|
||||
/// for an example of how the device ID is constructed). The intention is that
|
||||
/// consumers of this will use this to create a "real" payload - eg, accumulating
|
||||
/// until some threshold number of syncs is reached, and contributing
|
||||
/// additional data which only the consumer knows.
|
||||
#[derive(Debug, Serialize, Default)]
|
||||
pub struct SyncTelemetryPing {
|
||||
version: u32,
|
||||
|
||||
uid: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
events: Vec<Event>,
|
||||
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
syncs: Vec<SyncTelemetry>,
|
||||
}
|
||||
|
||||
impl SyncTelemetryPing {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
version: 1,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn uid(&mut self, uid: String) {
|
||||
if let Some(ref existing) = self.uid {
|
||||
if *existing != uid {
|
||||
log::warn!("existing uid ${} being replaced by {}", existing, uid);
|
||||
}
|
||||
}
|
||||
self.uid = Some(uid);
|
||||
}
|
||||
|
||||
pub fn sync(&mut self, mut s: SyncTelemetry) {
|
||||
s.finished();
|
||||
self.syncs.push(s);
|
||||
}
|
||||
|
||||
pub fn event(&mut self, e: Event) {
|
||||
self.events.push(e);
|
||||
}
|
||||
}
|
||||
|
||||
ffi_support::implement_into_ffi_by_json!(SyncTelemetryPing);
|
||||
|
||||
#[cfg(test)]
|
||||
mod ping_tests {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_ping() {
|
||||
let engine = Engine::new("test");
|
||||
let mut s = SyncTelemetry::new();
|
||||
s.engine(engine);
|
||||
let mut p = SyncTelemetryPing::new();
|
||||
p.uid("user-id".into());
|
||||
p.sync(s);
|
||||
let event = Event::new("foo", "bar");
|
||||
p.event(event);
|
||||
assert_json(
|
||||
&p,
|
||||
serde_json::json!({
|
||||
"events": [{
|
||||
"method": "bar", "object": "foo"
|
||||
}],
|
||||
"syncs": [{
|
||||
"engines": [{
|
||||
"name": "test", "when": 0.0
|
||||
}],
|
||||
"when": 0.0
|
||||
}],
|
||||
"uid": "user-id",
|
||||
"version": 1
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
|
@ -57,6 +57,8 @@ fluent-langneg-ffi = { path = "../../../../intl/locale/rust/fluent-langneg-ffi"
|
|||
fluent = { version = "0.11" , features = ["fluent-pseudo"] }
|
||||
fluent-ffi = { path = "../../../../intl/l10n/rust/fluent-ffi" }
|
||||
|
||||
sync15-traits = { git = "https://github.com/mozilla/application-services", rev = "120e51dd5f2aab4194cf0f7e93b2a8923f4504bb" }
|
||||
|
||||
[build-dependencies]
|
||||
rustc_version = "0.2"
|
||||
|
||||
|
|
|
@ -70,6 +70,8 @@ extern crate fluent_langneg_ffi;
|
|||
extern crate fluent;
|
||||
extern crate fluent_ffi;
|
||||
|
||||
extern crate sync15_traits;
|
||||
|
||||
#[cfg(feature = "remote")]
|
||||
extern crate remote;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче