Bug 1803726 (part 1) - update to a new application-services. r=bdk,supply-chain-reviewers

Differential Revision: https://phabricator.services.mozilla.com/D163672
This commit is contained in:
Mark Hammond 2022-12-04 10:30:29 +00:00
Родитель 836ebe8236
Коммит e925353b45
69 изменённых файлов: 2107 добавлений и 1522 удалений

Просмотреть файл

@ -50,7 +50,7 @@ rev = "fb7a2b12ced3b43e6a268621989c6191d1ed7e39"
[source."https://github.com/mozilla/application-services"]
git = "https://github.com/mozilla/application-services"
replace-with = "vendored-sources"
rev = "b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
rev = "4d858e4266c6d06d86d361279e6e04dbe5583cb9"
[source."https://github.com/mozilla-spidermonkey/jsparagus"]
git = "https://github.com/mozilla-spidermonkey/jsparagus"

24
Cargo.lock сгенерированный
Просмотреть файл

@ -1549,9 +1549,14 @@ dependencies = [
[[package]]
name = "error-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
dependencies = [
"lazy_static",
"log",
"parking_lot 0.12.999",
"uniffi",
"uniffi_build",
"uniffi_macros",
]
[[package]]
@ -2683,7 +2688,7 @@ dependencies = [
[[package]]
name = "interrupt-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
dependencies = [
"lazy_static",
"parking_lot 0.12.999",
@ -3767,7 +3772,7 @@ dependencies = [
[[package]]
name = "nss_build_common"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
[[package]]
name = "nsstring"
@ -4945,7 +4950,7 @@ dependencies = [
[[package]]
name = "sql-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
dependencies = [
"ffi-support",
"interrupt-support",
@ -5127,7 +5132,7 @@ dependencies = [
[[package]]
name = "sync-guid"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
dependencies = [
"base64",
"rand 0.8.5",
@ -5138,7 +5143,7 @@ dependencies = [
[[package]]
name = "sync15"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
dependencies = [
"anyhow",
"error-support",
@ -5168,7 +5173,7 @@ dependencies = [
[[package]]
name = "tabs"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
dependencies = [
"anyhow",
"error-support",
@ -5912,11 +5917,12 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "viaduct"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
dependencies = [
"ffi-support",
"log",
"once_cell",
"parking_lot 0.12.999",
"prost",
"prost-derive",
"serde",
@ -6068,7 +6074,7 @@ dependencies = [
[[package]]
name = "webext-storage"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb#b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb"
source = "git+https://github.com/mozilla/application-services?rev=4d858e4266c6d06d86d361279e6e04dbe5583cb9#4d858e4266c6d06d86d361279e6e04dbe5583cb9"
dependencies = [
"error-support",
"ffi-support",

Просмотреть файл

@ -161,12 +161,12 @@ midir = { git = "https://github.com/mozilla/midir.git", rev = "e1b4dcb767f9e69af
minidump_writer_linux = { git = "https://github.com/rust-minidump/minidump-writer.git", rev = "75ada456c92a429704691a85e1cb42fef8cafc0d" }
# application-services overrides to make updating them all simpler.
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb" }
sql-support = { git = "https://github.com/mozilla/application-services", rev = "b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb" }
sync15 = { git = "https://github.com/mozilla/application-services", rev = "b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb" }
tabs = { git = "https://github.com/mozilla/application-services", rev = "b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb" }
viaduct = { git = "https://github.com/mozilla/application-services", rev = "b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb" }
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "b09ffe23ee60a066176e5d7f9f2c6cd95c528ceb" }
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "4d858e4266c6d06d86d361279e6e04dbe5583cb9" }
sql-support = { git = "https://github.com/mozilla/application-services", rev = "4d858e4266c6d06d86d361279e6e04dbe5583cb9" }
sync15 = { git = "https://github.com/mozilla/application-services", rev = "4d858e4266c6d06d86d361279e6e04dbe5583cb9" }
tabs = { git = "https://github.com/mozilla/application-services", rev = "4d858e4266c6d06d86d361279e6e04dbe5583cb9" }
viaduct = { git = "https://github.com/mozilla/application-services", rev = "4d858e4266c6d06d86d361279e6e04dbe5583cb9" }
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "4d858e4266c6d06d86d361279e6e04dbe5583cb9" }
# Patch mio 0.6 to use winapi 0.3 and miow 0.3, getting rid of winapi 0.2.
# There is not going to be new version of mio 0.6, mio now being >= 0.7.11.

Просмотреть файл

@ -1754,4 +1754,3 @@ who = "Henri Sivonen <hsivonen@hsivonen.fi>"
criteria = "safe-to-deploy"
version = "0.2.0"
notes = "I, Henri Sivonen, wrote this crate myself for Gecko even though it's published on crates.io."

Просмотреть файл

@ -1438,4 +1438,3 @@ criteria = "safe-to-deploy"
[[exemptions.zip]]
version = "0.6.2"
criteria = "safe-to-run"

Просмотреть файл

@ -294,4 +294,3 @@ version = "0.6.0"
who = "Johan Andersson <opensource@embark-studios.com>"
criteria = "safe-to-run"
version = "0.6.0"

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"bfb27ec0065630fe2fb81346586eae8aef85ad093713679b8c3c87f10c2359b7","android/build.gradle":"200fe9fcf26477ae4e941dd1e702c43deae9fb0a7252569bd7352eac1771efbe","android/src/main/AndroidManifest.xml":"4f8b16fa6a03120ac810c6438a3a60294075414d92e06caa7e85388e389e5d17","build.rs":"3c128073c7dece175e6e7117fb363e8047fb997b2cfa8ab29f7c2cc484cb7916","src/errorsupport.udl":"be379c47340f504ae9885ca20cf9849d273c7dadc2782c5a53c1b41d5f06f32b","src/handling.rs":"eaf83a921116e3443d932582bb68871b8ffa336238f16f5d026b1fe75cea1d01","src/lib.rs":"5d996f16d289bce2a44fe8d7c5c538597770c9f67f425bab06e2efa982381ca5","src/macros.rs":"30a56a9ddaabb8b0f794b2ee76623277bc6dc9da41040bca54fc2e276fc0322e","src/reporting.rs":"65ab92cff0980f594da2c8556cc050066f137615818dbbd52152438b15a87816","uniffi.toml":"644fe81c12fe3c01ee81e017ca3c00d0e611f014b7eade51aadaf208179a3450"},"package":null}
{"files":{"Cargo.toml":"83ebcdeefa2cf29b7f1d3bbfad39ef9da77ca3fabc21db6125b0550f46d9e71e","README.md":"7719c19eacb99e3208c611374a5e08da331d1317d5e0d5df7c0db25936d5d767","android/build.gradle":"200fe9fcf26477ae4e941dd1e702c43deae9fb0a7252569bd7352eac1771efbe","android/src/main/AndroidManifest.xml":"4f8b16fa6a03120ac810c6438a3a60294075414d92e06caa7e85388e389e5d17","build.rs":"587f12902443392d5bbbf46f20937018acb4b3f180d1240934bb646583c430eb","src/errorsupport.udl":"e793034d01a2608298528051757f38405e006ee1abc4cf65dc6f18c53590ace8","src/handling.rs":"545c969d71907d81cb5af93f435ba443508adda2ec57ac2a975fed7d9828ccea","src/lib.rs":"623ad5e238bf41a071e833a8ac0d50394ab8fd63bf7aa4eed5675d1d95e32195","src/macros.rs":"de029394fec9d0367d1b7f5156c60ebe547b79370b04c78f4c1de9ffe2f8ea86","src/redact.rs":"c9a4df1a87be68b15d583587bda941d4c60a1d0449e2d43ff99f3611a290a863","src/reporting.rs":"38efd24d86ba8facfb181cb27e8b698d2831db0afab85691ffda034a4dc68dfa","uniffi.toml":"644fe81c12fe3c01ee81e017ca3c00d0e611f014b7eade51aadaf208179a3450"},"package":null}

14
third_party/rust/error-support/Cargo.toml поставляемый
Просмотреть файл

@ -7,18 +7,14 @@ license = "MPL-2.0"
[dependencies]
log = "0.4"
lazy_static = { version = "1.4", optional = true }
parking_lot = { version = ">=0.11,<=0.12", optional = true }
uniffi = { version = "^0.21", optional = true }
uniffi_macros = { version = "^0.21", optional = true }
lazy_static = { version = "1.4" }
parking_lot = { version = ">=0.11,<=0.12" }
uniffi = { version = "^0.21" }
uniffi_macros = { version = "^0.21" }
[dependencies.backtrace]
optional = true
version = "0.3"
[features]
default = []
reporting = ["lazy_static", "parking_lot", "uniffi", "uniffi_macros", "uniffi_build"]
[build-dependencies]
uniffi_build = { version = "^0.21", features=["builtin-bindgen"], optional = true }
uniffi_build = { version = "^0.21", features=["builtin-bindgen"] }

89
third_party/rust/error-support/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,89 @@
# Application error handling support
This crate provides support for other crates to effectively report errors.
Because app-services components get embedded in various apps, written in
multiple languages, we face several challenges:
- Rust stacktraces are generally not available so we often need to provide
extra context to help debug where an error is occuring.
- Without stack traces, Sentry and other error reporting systems don't do a
great job at auto-grouping errors together, so we need to manually group them.
- We can't hook directly into the error reporting system or even depend on a
particular error reporting system to be in use. This means the system
needs to be simple and flexible enough to plug in to multiple systems.
## Breadcrumbs as context
We use a breadcrumb system as the basis for adding context to errors.
Breadcrumbs are individual messages that form a log-style stream, and the most
recent breadcrumbs get attached to each error reported. There are a lot of
other ways to provide context to an error, but we just use breadcrumbs for
everything because it's a relatively simple system that's easy to hook up to
error reporting platforms.
## Basic error reporting tools
Basic error reporting is handled using several macros:
- `report_error!()` creates an error report. It inputs a `type_name` as the
first parameter, and `format!` style arguments afterwards. `type_name` is
used to group errors together and show up as error titles/headers. Use the
format-style args to create is a long-form description for the issue. Most
of the time you won't need to call this directly, since can automatically
do it when converting internal results to public ones. However, it can be
useful in the case where you see an error that you want
to report, but want to try to recover rather than returning an error.
- `breadcrumb!()` creates a new breadcrumb that will show up on future errors.
- `trace_error!()` inputs a `Result<>` and creates a breadcrumb if it's an
`Err`. This is useful if when you're trying to track down where an error
originated from, since you can wrap each possible source of the error with
`trace_error!()`. `trace_error!()` returns the result passed in to it,
which makes wrapping function calls easy.
## Public/Internal errors and converting between them
Our components generally create 2 error enums: one for internal use and one for
the public API. They are typically named `Error` and
`[ComponentName]ApiError`. The internal error typically carries a lot of
low-level details and lots of variants which is useful to us app-services
developers. The public error typically has less variants with the variants
often just storing a reason string rather than low-level error codes. There
are also two `Result<>` types that correspond to these two errors, typically
named `Result` and `ApiResult`.
This means we need to convert from internal errors to public errors, which has
the nice side benefit of giving us a centralized spot to make error reports for
selected public errors. This is done with the `ErrorHandling` type and
`GetErrorHandling` trait in `src/handling.rs`. The basic system is that you
convert between one error to another and choose if you want to report the error
and/or log a warning. When reporting an error you can choose a type name to
group the error with. This system is extremely flexible, since you can inspect
the internal error and use error codes or other data to determine if it should
be reported or not, which type name to report it with, etc. Eventually we also
hope to allow expected errors to be counted in telemetry (think things like
network errors, shutdown errors, etc.).
To assist this conversion, the `handle_error!` macro can be used to
automatically convert between `Result` and `ApiResult` using
`GetErrorHandling`. Note that this depends on having the `Result` type
imported in your module with a `use` statement.
See the `logins::errors` and `logins::store` modules for an example of how this
all fits together.
## ⚠️ Personally Identifiable Information ⚠️
When converting internal errors to public errors, we should ensure that there
is no personally identifying information (PII) in any error reports. We should
also ensure that no PII is contained in the public error enum, since consumers
may end up uses those for their own error reports.
We operate on a best-effort basis to ensure this. Our error details often come
from an error from one of our dependencies, which makes it very diffucult to be
completely sure though. For example, `rusqlite::Error` could include data from
a user's database in their errors, which would then appear in our error
variants. However, we've never seen that in practice so we are comfortable
including the `rusqlite` error message in our error reports, without attempting
to sanitize them.

1
third_party/rust/error-support/build.rs поставляемый
Просмотреть файл

@ -4,6 +4,5 @@
*/
fn main() {
#[cfg(feature = "reporting")]
uniffi_build::generate_scaffolding("./src/errorsupport.udl").unwrap();
}

Просмотреть файл

@ -3,7 +3,11 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
namespace errorsupport {
// Set the global error reporter. This is typically done early in startup.
void set_application_error_reporter(ApplicationErrorReporter error_reporter);
// Unset the global error reporter. This is typically done at shutdown for
// platforms that want to cleanup references like Desktop.
void unset_application_error_reporter();
};
callback interface ApplicationErrorReporter {

Просмотреть файл

@ -106,10 +106,7 @@ where
// XXX - should we arrange for the `report_class` to have the
// original crate calling this as a prefix, or will we still be
// able to identify that?
#[cfg(feature = "reporting")]
crate::report_error(report_class, e.to_string());
#[cfg(not(feature = "reporting"))]
let _ = report_class; // avoid clippy warning when feature's not enabled.
crate::report_error_to_app(report_class, e.to_string());
}
handling.err
}

9
third_party/rust/error-support/src/lib.rs поставляемый
Просмотреть файл

@ -24,11 +24,13 @@ pub mod backtrace {
}
}
#[cfg(feature = "reporting")]
mod redact;
pub use redact::*;
mod reporting;
#[cfg(feature = "reporting")]
pub use reporting::{
report_breadcrumb, report_error, set_application_error_reporter, ApplicationErrorReporter,
report_breadcrumb, report_error_to_app, set_application_error_reporter,
unset_application_error_reporter, ApplicationErrorReporter,
};
mod handling;
@ -155,5 +157,4 @@ macro_rules! define_error {
};
}
#[cfg(feature = "reporting")]
uniffi_macros::include_scaffolding!("errorsupport");

Просмотреть файл

@ -21,7 +21,7 @@ macro_rules! report_error {
($type_name:expr, $($arg:tt)*) => {
let message = std::format!($($arg)*);
::log::warn!("report {}: {}", $type_name, message);
$crate::report_error($type_name.to_string(), message.to_string());
$crate::report_error_to_app($type_name.to_string(), message.to_string());
};
}

75
third_party/rust/error-support/src/redact.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,75 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Functions to redact strings to remove PII before logging them
/// Redact a URL.
///
/// It's tricky to redact an URL without revealing PII. We check for various known bad URL forms
/// and report them, otherwise we just log "<URL>".
pub fn redact_url(url: &str) -> String {
if url.is_empty() {
return "<URL (empty)>".to_string();
}
match url.find(':') {
None => "<URL (no scheme)>".to_string(),
Some(n) => {
let mut chars = url[0..n].chars();
match chars.next() {
// No characters in the scheme
None => return "<URL (empty scheme)>".to_string(),
Some(c) => {
// First character must be alphabetic
if !c.is_ascii_alphabetic() {
return "<URL (invalid scheme)>".to_string();
}
}
}
for c in chars {
// Subsequent characters must be in the set ( alpha | digit | "+" | "-" | "." )
if !(c.is_ascii_alphanumeric() || c == '+' || c == '-' || c == '.') {
return "<URL (invalid scheme)>".to_string();
}
}
"<URL>".to_string()
}
}
}
/// Redact compact jwe string (Five base64 segments, separated by `.` chars)
pub fn redact_compact_jwe(url: &str) -> String {
url.replace(|ch| ch != '.', "x")
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_redact_url() {
assert_eq!(redact_url("http://some.website.com/index.html"), "<URL>");
assert_eq!(redact_url("about:config"), "<URL>");
assert_eq!(redact_url(""), "<URL (empty)>");
assert_eq!(redact_url("://some.website.com/"), "<URL (empty scheme)>");
assert_eq!(redact_url("some.website.com/"), "<URL (no scheme)>");
assert_eq!(redact_url("some.website.com/"), "<URL (no scheme)>");
assert_eq!(
redact_url("abc%@=://some.website.com/"),
"<URL (invalid scheme)>"
);
assert_eq!(
redact_url("0https://some.website.com/"),
"<URL (invalid scheme)>"
);
assert_eq!(
redact_url("a+weird-but.lega1-SCHEME://some.website.com/"),
"<URL>"
);
}
#[test]
fn test_redact_compact_jwe() {
assert_eq!(redact_compact_jwe("abc.1234.x3243"), "xxx.xxxx.xxxxx")
}
}

Просмотреть файл

@ -53,7 +53,11 @@ pub fn set_application_error_reporter(reporter: Box<dyn ApplicationErrorReporter
*APPLICATION_ERROR_REPORTER.write() = reporter;
}
pub fn report_error(type_name: String, message: String) {
pub fn unset_application_error_reporter() {
*APPLICATION_ERROR_REPORTER.write() = Box::new(DefaultApplicationErrorReporter)
}
pub fn report_error_to_app(type_name: String, message: String) {
APPLICATION_ERROR_REPORTER
.read()
.report_error(type_name, message);

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"7664cc89612aaba5a3142eac4453e7fcb56a107d86da99aa85fdbbd02f631eb4","build.rs":"d89f20669c2e243652630ce025199d187d2a128be6a626a02e0679bd74a2ad5a","src/conn_ext.rs":"46afc4446a93d619bf6b4b2c0cc3110c02d220e5436b3eccbce450d0dd7c5dd9","src/each_chunk.rs":"8aaba842e43b002fbc0fee95d14ce08faa7187b1979c765b2e270cd4802607a5","src/lib.rs":"5ef805ff741181745eb13cff2b93f044021b8df7ed5cde1551f520bce3c82823","src/maybe_cached.rs":"0b18425595055883a98807fbd62ff27a79c18af34e7cb3439f8c3438463ef2dd","src/open_database.rs":"bae2a576185eb784f1f9dafd9b38c85a90bc76b431e7fd4140507337cb0cdfcd","src/repeat.rs":"1885f4dd36cc21fabad1ba28ad2ff213ed17707c57564e1c0d7b0349112118bb"},"package":null}
{"files":{"Cargo.toml":"7664cc89612aaba5a3142eac4453e7fcb56a107d86da99aa85fdbbd02f631eb4","build.rs":"d89f20669c2e243652630ce025199d187d2a128be6a626a02e0679bd74a2ad5a","src/conn_ext.rs":"5be15d5eaf2fa733bc8b2dc7fb4bcd3845d4e10120c8c6f248a06592a37ee80f","src/each_chunk.rs":"8aaba842e43b002fbc0fee95d14ce08faa7187b1979c765b2e270cd4802607a5","src/lib.rs":"5ef805ff741181745eb13cff2b93f044021b8df7ed5cde1551f520bce3c82823","src/maybe_cached.rs":"0b18425595055883a98807fbd62ff27a79c18af34e7cb3439f8c3438463ef2dd","src/open_database.rs":"dcc7140095f9ba20442b5e6294d8af924d54588837cf2ce019dff013fa198783","src/repeat.rs":"1885f4dd36cc21fabad1ba28ad2ff213ed17707c57564e1c0d7b0349112118bb"},"package":null}

Просмотреть файл

@ -58,6 +58,11 @@ pub trait ConnExt {
Ok(())
}
/// Execute a single statement.
fn execute_one(&self, stmt: &str) -> SqlResult<()> {
self.execute_all(&[stmt])
}
/// Equivalent to `Connection::execute` but caches the statement so that subsequent
/// calls to `execute_cached` will have improved performance.
fn execute_cached<P: Params>(&self, sql: &str, params: P) -> SqlResult<usize> {

Просмотреть файл

@ -63,7 +63,7 @@ pub trait ConnectionInitializer {
// Runs immediately after creation for all types of connections. If writable,
// will *not* be in the transaction created for the "only writable" functions above.
fn prepare(&self, _conn: &Connection) -> Result<()> {
fn prepare(&self, _conn: &Connection, _db_empty: bool) -> Result<()> {
Ok(())
}
@ -109,14 +109,14 @@ fn do_open_database_with_flags<CI: ConnectionInitializer, P: AsRef<Path>>(
log::debug!("{}: opening database", CI::NAME);
let mut conn = Connection::open_with_flags(path, open_flags)?;
log::debug!("{}: checking if initialization is necessary", CI::NAME);
let run_init = should_init(&conn)?;
let db_empty = is_db_empty(&conn)?;
log::debug!("{}: preparing", CI::NAME);
connection_initializer.prepare(&conn)?;
connection_initializer.prepare(&conn, db_empty)?;
if open_flags.contains(OpenFlags::SQLITE_OPEN_READ_WRITE) {
let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate)?;
if run_init {
if db_empty {
log::debug!("{}: initializing new database", CI::NAME);
connection_initializer.init(&tx)?;
} else {
@ -141,7 +141,7 @@ fn do_open_database_with_flags<CI: ConnectionInitializer, P: AsRef<Path>>(
} else {
// There's an implied requirement that the first connection to a DB is
// writable, so read-only connections do much less, but panic if stuff is wrong
assert!(!run_init, "existing writer must have initialized");
assert!(!db_empty, "existing writer must have initialized");
assert!(
get_schema_version(&conn)? == CI::END_VERSION,
"existing writer must have migrated"
@ -212,7 +212,7 @@ fn try_handle_db_failure<CI: ConnectionInitializer, P: AsRef<Path>>(
}
}
fn should_init(conn: &Connection) -> Result<bool> {
fn is_db_empty(conn: &Connection) -> Result<bool> {
Ok(conn.query_one::<u32>("SELECT COUNT(*) FROM sqlite_master")? == 0)
}
@ -340,7 +340,7 @@ mod test {
const NAME: &'static str = "test db";
const END_VERSION: u32 = 4;
fn prepare(&self, conn: &Connection) -> Result<()> {
fn prepare(&self, conn: &Connection, _: bool) -> Result<()> {
self.push_call("prep");
conn.execute_batch(
"

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"1f11acaa90a112979205b4c7af9ba0c015afab5f3141dd082d58c862c84490e3","README.md":"6d4ff5b079ac5340d18fa127f583e7ad793c5a2328b8ecd12c3fc723939804f2","src/bso_record.rs":"1983a4ed506e8ea3e749aca93aad672464cd6f370ff18f6108bda51f4a357260","src/client/coll_state.rs":"b0c47e44168ea2c7017cd8531f76bb230f9be66b119bb7416537b8693a1d0a0a","src/client/coll_update.rs":"021144c8606f8a7114b194ed830f4f756c75105146620f36b7ff9c37237d49f4","src/client/collection_keys.rs":"847296c161773931d3b9dcd6e1ec5ac740e69acc032faa15bb1eed6a300c6336","src/client/mod.rs":"9500b1d22a5064bbbd6a3d6bcc63fc4191e8ea4605ded359bc6c2dc2887626a3","src/client/request.rs":"b8996ebd27127c71c1ecfd329e925859df71caa5529f906b0ce2b565cf4362b6","src/client/state.rs":"590b8fc7458b7973d81878075e6cf65c5c529f9d9c9794e30e4158c8ded26727","src/client/status.rs":"f445a8765dac9789444e23b5145148413407bb1d18a15ef56682243997f591bf","src/client/storage_client.rs":"d2b52946f13a724a13f9f97b122ba84190cc334b30bb53c7c5791d35d115bf50","src/client/sync.rs":"ed7225c314df27793ed5de6da93cc4b75a98da1c14ac82e37a723a99821d4dc7","src/client/sync_multiple.rs":"a2f6372496cc37025b07b260f6990699323ceb460d8e44d320502ad8e858fa06","src/client/token.rs":"b268759d31e0fe17e0e2a428694cd9a317fcfbdd52f023d5d8c7cc6f00f1a102","src/client/util.rs":"71cc70ee41f821f53078675e636e9fad9c6046fa1a989e37f5487e340a2277d6","src/client_types.rs":"c53e6fa8e9d5c7b56a87c6803ec3fc808d471b1d8c20c0fbb4ec0c02571b21ba","src/clients_engine/engine.rs":"856a099586af0e0d897437e6e2cea1244169b7406e0809e0d3f17b8970e0ad69","src/clients_engine/mod.rs":"461729e6f89b66b2cbd89b041a03d4d6a8ba582284ed4f3015cb13e1a0c6da97","src/clients_engine/record.rs":"59826b7f21b45d3dbee7b332abde774cb9cfa82eaa5e11a96ec95cb7d8f5a45f","src/clients_engine/ser.rs":"9796e44ed7daf04f22afbb51238ac25fd0de1438b72181351b4ca29fd70fd429","src/engine/bridged_engine.rs":"f7bb70dbc2eec46fe5ba8952c867e20b794fc01a514dc360bb5a1f15508958f9","src/engine/changeset.rs":"442aa92b5130ec0f8f2b0054acb399c547380e0060015cbf4ca7a72027440d54","src/engine/mod.rs":"67d0d7b05ab7acff03180ce0337340297111697b96eb876046e24314f14226c5","src/engine/request.rs":"f40bac0b3f5286446a4056de885fd81e4fa77e4dc7d5bbb6aa644b93201046de","src/engine/sync_engine.rs":"5314d0163ccc93d78f5879d52cf2b60b9622e80722d84d3482cfa7c26df6bfdd","src/error.rs":"a45cfe02e6301f473c34678b694943c1a04308b8c292c6e0448bf495194c3b5e","src/key_bundle.rs":"7991905758c730e7e100064559b7661c36bb8be15476467cf94f65a417f1a28a","src/lib.rs":"a6df9f32ecd622c0286582cf859072b51bc233caf9c8f7bda861a03d8fddea84","src/payload.rs":"98710dda512d5f7eccecf84c7c1cd3af37a8b360166de20ae0aca37e7461454c","src/record_types.rs":"02bb3d352fb808131d298f9b90d9c95b7e9e0138b97c5401f3b9fdacc5562f44","src/server_timestamp.rs":"ff45c59ff0be51a6de6d0ea43d6d6aa6806ada9847446c3bb178e8f0a43a4f89","src/telemetry.rs":"3471aaaaca275496ec6880723e076ce39b44fb351ca88e53fe63750a43255c33"},"package":null}
{"files":{"Cargo.toml":"647789c03441801611f5b4b3570e1fdeaee7fda2c236a0d2797749b22684d43f","README.md":"6d4ff5b079ac5340d18fa127f583e7ad793c5a2328b8ecd12c3fc723939804f2","src/bso/content.rs":"4e65e3126557a70a8add22af9944f7d0a1bd53deb4173372b12f4ac2f9267e5c","src/bso/crypto.rs":"27602dcccb37d3a55620ee4e16b705da455d49af575de115c7c79c0178eb1d6d","src/bso/mod.rs":"b5490d624a878ec1a5b0375d27683e806accc0eeb496b8b491dc3326e5f2452f","src/bso/test_utils.rs":"268960da1d8dd4724b3704d62f4e5849f699c0a2ac5aa96cbbe7da7116e4eb7a","src/client/coll_state.rs":"b0c47e44168ea2c7017cd8531f76bb230f9be66b119bb7416537b8693a1d0a0a","src/client/coll_update.rs":"cc12dfde0817eae68aa8e176497ed16e9e3307f72a33faa3fe329d7a3bfd1598","src/client/collection_keys.rs":"c27b2277a3a52033b58ab01490fc2ea7007494195dd5e6dc2c6931a4ca96795a","src/client/mod.rs":"9500b1d22a5064bbbd6a3d6bcc63fc4191e8ea4605ded359bc6c2dc2887626a3","src/client/request.rs":"8841524e37d8195867bdf6ba98c75f610cf47a4644adeebd6372cc6713f2260a","src/client/state.rs":"4e31193ef2471c1dfabf1c6a391bcb95e14ddb45855786a4194ff187d5c9347c","src/client/status.rs":"f445a8765dac9789444e23b5145148413407bb1d18a15ef56682243997f591bf","src/client/storage_client.rs":"3637b4522048353b06ad24031c150c66c13d9c27cef293e400db88807421633c","src/client/sync.rs":"ed7225c314df27793ed5de6da93cc4b75a98da1c14ac82e37a723a99821d4dc7","src/client/sync_multiple.rs":"3729d4afd90ab1bd9982a3506252c99d8f37619cc1792ef4feba352ad01a7192","src/client/token.rs":"b268759d31e0fe17e0e2a428694cd9a317fcfbdd52f023d5d8c7cc6f00f1a102","src/client/util.rs":"71cc70ee41f821f53078675e636e9fad9c6046fa1a989e37f5487e340a2277d6","src/client_types.rs":"c53e6fa8e9d5c7b56a87c6803ec3fc808d471b1d8c20c0fbb4ec0c02571b21ba","src/clients_engine/engine.rs":"ba9f8efc068392c3ecfc7241d6ddd96912036da3e497ea6920c6085ba9e537bb","src/clients_engine/mod.rs":"461729e6f89b66b2cbd89b041a03d4d6a8ba582284ed4f3015cb13e1a0c6da97","src/clients_engine/record.rs":"50bfa33610581dce97f6e6973e18dbcdbf7520f3d48f4d71b6ba04eb0a4ffa1e","src/clients_engine/ser.rs":"9796e44ed7daf04f22afbb51238ac25fd0de1438b72181351b4ca29fd70fd429","src/enc_payload.rs":"aa3eea7df49b24cd59831680a47c417b73a3e36e6b0f3f4baf14ca66bd68be6b","src/engine/bridged_engine.rs":"9c0d602b3553932e77a87caba9262d3a0fc146500c6d46f1770273be6636d064","src/engine/changeset.rs":"5e323aa07f0b18d22495a695b829326d18287ff75155b4818adf66b86e16ba00","src/engine/mod.rs":"f84a254642c1876fe56506703fb010a7866eb5d40af3fc238bf92b62a61cb6cc","src/engine/request.rs":"f40bac0b3f5286446a4056de885fd81e4fa77e4dc7d5bbb6aa644b93201046de","src/engine/sync_engine.rs":"5314d0163ccc93d78f5879d52cf2b60b9622e80722d84d3482cfa7c26df6bfdd","src/error.rs":"a45cfe02e6301f473c34678b694943c1a04308b8c292c6e0448bf495194c3b5e","src/key_bundle.rs":"56b67ef12d7cb2afca540cf3c29f1748418bbbb023f9b663344cf28fdc2e8766","src/lib.rs":"41c2171b0e1a96adfd56682ca90bd4ac59fe9390a6872f85128948bdb53a0d42","src/record_types.rs":"02bb3d352fb808131d298f9b90d9c95b7e9e0138b97c5401f3b9fdacc5562f44","src/server_timestamp.rs":"0020f31971ccbfc485894cabc3087459d42252b86d7de07f2136997864b0373b","src/telemetry.rs":"3471aaaaca275496ec6880723e076ce39b44fb351ca88e53fe63750a43255c33"},"package":null}

3
third_party/rust/sync15/Cargo.toml поставляемый
Просмотреть файл

@ -44,6 +44,9 @@ sync-client = ["sync-engine", "crypto", "viaduct", "url"]
# upgraded to make the sync-client part truly optional.
standalone-sync = ["sync-client"]
# A feature designed to be enabled in dev_dependencies.
test-utils = []
[dependencies]
anyhow = "1.0"
base16 = { version = "0.2", optional = true }

384
third_party/rust/sync15/src/bso/content.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,384 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
//! This module enhances the IncomingBso and OutgoingBso records to deal with
//! arbitrary <T> types, which we call "content"
//! It can:
//! * Parse JSON into some <T> while handling tombstones and invalid json.
//! * Turn arbitrary <T> objects with an `id` field into an OutgoingBso.
use super::{IncomingBso, IncomingContent, IncomingKind, OutgoingBso, OutgoingEnvelope};
use crate::Guid;
use error_support::report_error;
use serde::Serialize;
// The only errors we return here are serde errors.
type Result<T> = std::result::Result<T, serde_json::Error>;
impl<T> IncomingContent<T> {
/// Returns Some(content) if [self.kind] is [IncomingKind::Content], None otherwise.
pub fn content(self) -> Option<T> {
match self.kind {
IncomingKind::Content(t) => Some(t),
_ => None,
}
}
}
// We don't want to force our T to be Debug, but we can be Debug if T is.
impl<T: std::fmt::Debug> std::fmt::Debug for IncomingKind<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
IncomingKind::Content(r) => {
write!(f, "IncomingKind::Content<{:?}>", r)
}
IncomingKind::Tombstone => write!(f, "IncomingKind::Tombstone"),
IncomingKind::Malformed => write!(f, "IncomingKind::Malformed"),
}
}
}
impl IncomingBso {
/// Convert an [IncomingBso] to an [IncomingContent] possibly holding a T.
pub fn into_content<T: for<'de> serde::Deserialize<'de>>(self) -> IncomingContent<T> {
match serde_json::from_str(&self.payload) {
Ok(json) => {
// We got a good serde_json::Value, see if it's a <T>.
let kind = json_to_kind(json, &self.envelope.id);
IncomingContent {
envelope: self.envelope,
kind,
}
}
Err(e) => {
// payload isn't valid json.
log::warn!("Invalid incoming cleartext {}: {}", self.envelope.id, e);
IncomingContent {
envelope: self.envelope,
kind: IncomingKind::Malformed,
}
}
}
}
}
impl OutgoingBso {
/// Creates a new tombstone record.
/// Not all collections expect tombstones.
pub fn new_tombstone(envelope: OutgoingEnvelope) -> Self {
Self {
envelope,
payload: serde_json::json!({"deleted": true}).to_string(),
}
}
/// Creates a outgoing record from some <T>, which can be made into a JSON object
/// with a valid `id`. This is the most convenient way to create an outgoing
/// item from a <T> when the default envelope is suitable.
/// Will panic if there's no good `id` in the json.
pub fn from_content_with_id<T>(record: T) -> Result<Self>
where
T: Serialize,
{
let (json, id) = content_with_id_to_json(record)?;
Ok(Self {
envelope: id.into(),
payload: serde_json::to_string(&json)?,
})
}
/// Create an Outgoing record with an explicit envelope. Will panic if the
/// payload has an ID but it doesn't match the envelope.
pub fn from_content<T>(envelope: OutgoingEnvelope, record: T) -> Result<Self>
where
T: Serialize,
{
let json = content_to_json(record, &envelope.id)?;
Ok(Self {
envelope,
payload: serde_json::to_string(&json)?,
})
}
}
// Helpers for packing and unpacking serde objects to and from a <T>. In particular:
// * Helping deal complications around raw json payload not having 'id' (the envelope is
// canonical) but needing it to exist when dealing with serde locally.
// For example, a record on the server after being decrypted looks like:
// `{"id": "a-guid", payload: {"field": "value"}}`
// But the `T` for this typically looks like `struct T { id: Guid, field: String}`
// So before we try and deserialize this record into a T, we copy the `id` field
// from the envelope into the payload, and when serializing from a T we do the
// reverse (ie, ensure the `id` in the payload is removed and placed in the envelope)
// * Tombstones.
// Deserializing json into a T
fn json_to_kind<T>(mut json: serde_json::Value, id: &Guid) -> IncomingKind<T>
where
T: for<'de> serde::Deserialize<'de>,
{
// In general, the payload does not carry 'id', but <T> does - so grab it from the
// envelope and put it into the json before deserializing the record.
if let serde_json::Value::Object(ref mut map) = json {
if map.contains_key("deleted") {
return IncomingKind::Tombstone;
}
match map.get("id") {
Some(serde_json::Value::String(content_id)) => {
// It exists in the payload! Note that this *should not* happen in practice
// (the `id` should *never* be in the payload), but if that does happen
// we should do the "right" thing, which is treat a mismatch as malformed.
if content_id != id {
log::trace!(
"malformed incoming record: envelope id: {} payload id: {}",
content_id,
id
);
report_error!(
"incoming-invalid-mismatched-ids",
"Envelope and payload don't agree on the ID"
);
return IncomingKind::Malformed;
}
if !id.is_valid_for_sync_server() {
log::trace!("malformed incoming record: id is not valid: {}", id);
report_error!(
"incoming-invalid-bad-payload-id",
"ID in the payload is invalid"
);
return IncomingKind::Malformed;
}
// We accidentally included the ID in the record in the past but no one should any more.
log::info!("incoming record has 'id' in the payload - it does match, but is still unexpected");
}
Some(v) => {
// It exists in the payload but is not a string - they can't possibly be
// the same as the envelope uses a String, so must be malformed.
log::trace!("malformed incoming record: id is not a string: {}", v);
report_error!("incoming-invalid-wrong_type", "ID is not a string");
return IncomingKind::Malformed;
}
None => {
if !id.is_valid_for_sync_server() {
log::trace!("malformed incoming record: id is not valid: {}", id);
report_error!(
"incoming-invalid-bad-envelope-id",
"ID in envelope is not valid"
);
return IncomingKind::Malformed;
}
map.insert("id".to_string(), id.to_string().into());
}
}
};
match serde_json::from_value(json) {
Ok(v) => IncomingKind::Content(v),
Err(e) => {
report_error!("invalid-incoming-content", "Invalid incoming T: {}", e);
IncomingKind::Malformed
}
}
}
// Serializing <T> into json with special handling of `id`
fn content_with_id_to_json<T>(record: T) -> Result<(serde_json::Value, Guid)>
where
T: Serialize,
{
let mut json = serde_json::to_value(record)?;
let id = match json.as_object_mut() {
Some(ref mut map) => {
match map.remove("id").as_ref().and_then(|v| v.as_str()) {
Some(id) => {
let id: Guid = id.into();
assert!(id.is_valid_for_sync_server(), "record's ID is invalid");
id
}
// In practice, this is a "static" error and not influenced by runtime behavior
None => panic!("record does not have an ID in the payload"),
}
}
None => panic!("record is not a json object"),
};
Ok((json, id))
}
fn content_to_json<T>(record: T, id: &Guid) -> Result<serde_json::Value>
where
T: Serialize,
{
let mut payload = serde_json::to_value(record)?;
if let Some(ref mut map) = payload.as_object_mut() {
if let Some(content_id) = map.remove("id").as_ref().and_then(|v| v.as_str()) {
assert_eq!(content_id, id);
assert!(id.is_valid_for_sync_server(), "record's ID is invalid");
}
};
Ok(payload)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::bso::IncomingBso;
use serde::{Deserialize, Serialize};
use serde_json::json;
#[derive(Default, Debug, PartialEq, Serialize, Deserialize)]
struct TestStruct {
id: Guid,
data: u32,
}
#[test]
fn test_content_deser() {
env_logger::try_init().ok();
let json = json!({
"id": "test",
"payload": json!({"data": 1}).to_string(),
});
let incoming: IncomingBso = serde_json::from_value(json).unwrap();
assert_eq!(incoming.envelope.id, "test");
let record = incoming.into_content::<TestStruct>().content().unwrap();
let expected = TestStruct {
id: Guid::new("test"),
data: 1,
};
assert_eq!(record, expected);
}
#[test]
fn test_content_deser_empty_id() {
env_logger::try_init().ok();
let json = json!({
"id": "",
"payload": json!({"data": 1}).to_string(),
});
let incoming: IncomingBso = serde_json::from_value(json).unwrap();
// The envelope has an invalid ID, but it's not handled until we try and deserialize
// it into a T
assert_eq!(incoming.envelope.id, "");
let content = incoming.into_content::<TestStruct>();
assert!(matches!(content.kind, IncomingKind::Malformed));
}
#[test]
fn test_content_deser_invalid() {
env_logger::try_init().ok();
// And a non-empty but still invalid guid.
let json = json!({
"id": "X".repeat(65),
"payload": json!({"data": 1}).to_string(),
});
let incoming: IncomingBso = serde_json::from_value(json).unwrap();
let content = incoming.into_content::<TestStruct>();
assert!(matches!(content.kind, IncomingKind::Malformed));
}
#[test]
fn test_content_deser_not_string() {
env_logger::try_init().ok();
// A non-string id.
let json = json!({
"id": "0",
"payload": json!({"id": 0, "data": 1}).to_string(),
});
let incoming: IncomingBso = serde_json::from_value(json).unwrap();
let content = incoming.into_content::<serde_json::Value>();
assert!(matches!(content.kind, IncomingKind::Malformed));
}
#[test]
fn test_content_ser_with_id() {
env_logger::try_init().ok();
// When serializing, expect the ID to be in the top-level payload (ie,
// in the envelope) but should not appear in the cleartext `payload` part of
// the payload.
let val = TestStruct {
id: Guid::new("test"),
data: 1,
};
let outgoing = OutgoingBso::from_content_with_id(val).unwrap();
// The envelope should have our ID.
assert_eq!(outgoing.envelope.id, Guid::new("test"));
// and make sure `cleartext` part of the payload only has data.
let ct_value = serde_json::from_str::<serde_json::Value>(&outgoing.payload).unwrap();
assert_eq!(ct_value, json!({"data": 1}));
}
#[test]
fn test_content_ser_with_envelope() {
env_logger::try_init().ok();
// When serializing, expect the ID to be in the top-level payload (ie,
// in the envelope) but should not appear in the cleartext `payload`
let val = TestStruct {
id: Guid::new("test"),
data: 1,
};
let envelope: OutgoingEnvelope = Guid::new("test").into();
let outgoing = OutgoingBso::from_content(envelope, val).unwrap();
// The envelope should have our ID.
assert_eq!(outgoing.envelope.id, Guid::new("test"));
// and make sure `cleartext` part of the payload only has data.
let ct_value = serde_json::from_str::<serde_json::Value>(&outgoing.payload).unwrap();
assert_eq!(ct_value, json!({"data": 1}));
}
#[test]
#[should_panic]
fn test_content_ser_no_ids() {
env_logger::try_init().ok();
#[derive(Serialize)]
struct StructWithNoId {
data: u32,
}
let val = StructWithNoId { data: 1 };
let _ = OutgoingBso::from_content_with_id(val);
}
#[test]
#[should_panic]
fn test_content_ser_not_object() {
env_logger::try_init().ok();
let _ = OutgoingBso::from_content_with_id(json!("string"));
}
#[test]
#[should_panic]
fn test_content_ser_mismatched_ids() {
env_logger::try_init().ok();
let val = TestStruct {
id: Guid::new("test"),
data: 1,
};
let envelope: OutgoingEnvelope = Guid::new("different").into();
let _ = OutgoingBso::from_content(envelope, val);
}
#[test]
#[should_panic]
fn test_content_empty_id() {
env_logger::try_init().ok();
let val = TestStruct {
id: Guid::new(""),
data: 1,
};
let _ = OutgoingBso::from_content_with_id(val);
}
#[test]
#[should_panic]
fn test_content_invalid_id() {
env_logger::try_init().ok();
let val = TestStruct {
id: Guid::new(&"X".repeat(65)),
data: 1,
};
let _ = OutgoingBso::from_content_with_id(val);
}
}

197
third_party/rust/sync15/src/bso/crypto.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,197 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Support for "encrypted bso"s, as received by the storage servers.
//! This module decrypts them into IncomingBso's suitable for use by the
//! engines.
use super::{IncomingBso, IncomingEnvelope, OutgoingBso, OutgoingEnvelope};
use crate::error;
use crate::key_bundle::KeyBundle;
use crate::EncryptedPayload;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
// The BSO implementation we use for encrypted payloads.
// Note that this is almost identical to the IncomingBso implementations, except
// instead of a String payload we use an EncryptedPayload. Obviously we *could*
// just use a String payload and transform it into an EncryptedPayload - any maybe we
// should - but this is marginally optimal in terms of deserialization.
#[derive(Deserialize, Debug)]
pub struct IncomingEncryptedBso {
#[serde(flatten)]
pub envelope: IncomingEnvelope,
#[serde(
with = "as_json",
bound(deserialize = "EncryptedPayload: DeserializeOwned")
)]
pub(crate) payload: EncryptedPayload,
}
impl IncomingEncryptedBso {
pub fn new(envelope: IncomingEnvelope, payload: EncryptedPayload) -> Self {
Self { envelope, payload }
}
/// Decrypt a BSO, consuming it into a clear-text version.
pub fn into_decrypted(self, key: &KeyBundle) -> error::Result<IncomingBso> {
Ok(IncomingBso::new(self.envelope, self.payload.decrypt(key)?))
}
}
#[derive(Serialize, Debug)]
pub struct OutgoingEncryptedBso {
#[serde(flatten)]
pub envelope: OutgoingEnvelope,
#[serde(with = "as_json", bound(serialize = "EncryptedPayload: Serialize"))]
payload: EncryptedPayload,
}
impl OutgoingEncryptedBso {
pub fn new(envelope: OutgoingEnvelope, payload: EncryptedPayload) -> Self {
Self { envelope, payload }
}
#[inline]
pub fn serialized_payload_len(&self) -> usize {
self.payload.serialized_len()
}
}
impl OutgoingBso {
pub fn into_encrypted(self, key: &KeyBundle) -> error::Result<OutgoingEncryptedBso> {
Ok(OutgoingEncryptedBso {
envelope: self.envelope,
payload: EncryptedPayload::from_cleartext(key, self.payload)?,
})
}
}
// The BSOs we write to the servers expect a "payload" attribute which is a JSON serialized
// string, rather than the JSON representation of the object.
// ie, the serialized object is expected to look like:
// `{"id": "some-guid", "payload": "{\"IV\": ... }"}` <-- payload is a string.
// However, if we just serialize it directly, we end up with:
// `{"id": "some-guid", "payload": {"IV": ... }}` <-- payload is an object.
// The magic here means we can serialize and deserialize directly into/from the object, correctly
// working with the payload as a string, instead of needing to explicitly stringify/parse the
// payload as an extra step.
//
// This would work for any <T>, but we only use it for EncryptedPayload - the way our cleartext
// BSOs work mean it's not necessary there as they define the payload as a String - ie, they do
// explicitly end up doing 2 JSON operations as an ergonomic design choice.
mod as_json {
use serde::de::{self, Deserialize, DeserializeOwned, Deserializer};
use serde::ser::{self, Serialize, Serializer};
pub fn serialize<T, S>(t: &T, serializer: S) -> Result<S::Ok, S::Error>
where
T: Serialize,
S: Serializer,
{
let j = serde_json::to_string(t).map_err(ser::Error::custom)?;
serializer.serialize_str(&j)
}
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
T: DeserializeOwned,
D: Deserializer<'de>,
{
let j = String::deserialize(deserializer)?;
serde_json::from_str(&j).map_err(de::Error::custom)
}
}
// Lots of stuff for testing the sizes of encrypted records, because the servers have
// certain limits in terms of max-POST sizes, forcing us to chunk uploads, but
// we need to calculate based on encrypted record size rather than the raw <T> size.
//
// This is a little cludgey but I couldn't think of another way to have easy deserialization
// without a bunch of wrapper types, while still only serializing a single time in the
// postqueue.
#[cfg(test)]
impl OutgoingEncryptedBso {
/// Return the length of the serialized payload.
pub fn payload_serialized_len(&self) -> usize {
self.payload.serialized_len()
}
// self.payload is private, but tests want to create funky things.
// XXX - test only, but test in another crate :(
//#[cfg(test)]
pub fn make_test_bso(ciphertext: String) -> Self {
Self {
envelope: OutgoingEnvelope {
id: "".into(),
sortindex: None,
ttl: None,
},
payload: EncryptedPayload {
iv: "".into(),
hmac: "".into(),
ciphertext,
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::bso::OutgoingEnvelope;
#[test]
fn test_deserialize_enc() {
let serialized = r#"{
"id": "1234",
"collection": "passwords",
"modified": 12344321.0,
"payload": "{\"IV\": \"aaaaa\", \"hmac\": \"bbbbb\", \"ciphertext\": \"ccccc\"}"
}"#;
let record: IncomingEncryptedBso = serde_json::from_str(serialized).unwrap();
assert_eq!(&record.envelope.id, "1234");
assert_eq!((record.envelope.modified.0 - 12_344_321_000).abs(), 0);
assert_eq!(record.envelope.sortindex, None);
assert_eq!(&record.payload.iv, "aaaaa");
assert_eq!(&record.payload.hmac, "bbbbb");
assert_eq!(&record.payload.ciphertext, "ccccc");
}
#[test]
fn test_deserialize_autofields() {
let serialized = r#"{
"id": "1234",
"collection": "passwords",
"modified": 12344321.0,
"sortindex": 100,
"ttl": 99,
"payload": "{\"IV\": \"aaaaa\", \"hmac\": \"bbbbb\", \"ciphertext\": \"ccccc\"}"
}"#;
let record: IncomingEncryptedBso = serde_json::from_str(serialized).unwrap();
assert_eq!(record.envelope.sortindex, Some(100));
assert_eq!(record.envelope.ttl, Some(99));
}
#[test]
fn test_serialize_enc() {
let goal = r#"{"id":"1234","payload":"{\"IV\":\"aaaaa\",\"hmac\":\"bbbbb\",\"ciphertext\":\"ccccc\"}"}"#;
let record = OutgoingEncryptedBso {
envelope: OutgoingEnvelope {
id: "1234".into(),
..Default::default()
},
payload: EncryptedPayload {
iv: "aaaaa".into(),
hmac: "bbbbb".into(),
ciphertext: "ccccc".into(),
},
};
let actual = serde_json::to_string(&record).unwrap();
assert_eq!(actual, goal);
let val_str_payload: serde_json::Value = serde_json::from_str(goal).unwrap();
assert_eq!(
val_str_payload["payload"].as_str().unwrap().len(),
record.payload.serialized_len()
)
}
}

202
third_party/rust/sync15/src/bso/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,202 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// This module defines our core "bso" abstractions.
/// In the terminology of this crate:
/// * "bso" is an acronym for "basic storage object" and used extensively in the sync server docs.
/// the record always has a well-defined "envelope" with metadata (eg, the ID of the record,
/// the server timestamp of the resource, etc) and a field called `payload`.
/// A bso is serialized to and from JSON.
/// * There's a "cleartext" bso:
/// ** The payload is a String, which itself is JSON encoded (ie, this string `payload` is
/// always double JSON encoded in a server record)
/// ** This supplies helper methods for working with the "content" (some arbitrary <T>) in the
/// payload.
/// * There's an "encrypted" bso
/// ** The payload is an [crate::enc_payload::EncryptedPayload]
/// ** Only clients use this; as soon as practical we decrypt and as late as practical we encrypt
/// to and from encrypted bsos.
/// ** The encrypted bsos etc are all in the [crypto] module and require the `crypto` feature.
///
/// Let's look at some real-world examples:
/// # meta/global
/// A "bso" (ie, record with an "envelope" and a "payload" with a JSON string) - but the payload
/// is cleartext.
/// ```json
/// {
/// "id":"global",
/// "modified":1661564513.50,
/// "payload": "{\"syncID\":\"p1z5_oDdOfLF\",\"storageVersion\":5,\"engines\":{\"passwords\":{\"version\":1,\"syncID\":\"6Y6JJkB074cF\"} /* snip */},\"declined\":[]}"
/// }```
///
/// # encrypted bsos:
/// Encrypted BSOs are still a "bso" (ie, a record with a field names `payload` which is a string)
/// but the payload is in the form of an EncryptedPayload.
/// For example, crypto/keys:
/// ```json
/// {
/// "id":"keys",
/// "modified":1661564513.74,
/// "payload":"{\"IV\":\"snip-base-64==\",\"hmac\":\"snip-hex\",\"ciphertext\":\"snip-base64==\"}"
/// }```
/// (Note that as described above, most code working with bsos *do not* use that `payload`
/// directly, but instead a decrypted cleartext bso.
///
/// Note all collection responses are the same shape as `crypto/keys` - a `payload` field with a
/// JSON serialized EncryptedPayload, it's just that the final <T> content differs for each
/// collection (eg, tabs and bookmarks have quite different <T>s JSON-encoded in the
/// String payload.)
///
/// For completeness, some other "non-BSO" records - no "id", "modified" or "payload" fields in
/// the response, just plain-old clear-text JSON.
/// # Example
/// ## `info/collections`
/// ```json
/// {
/// "bookmarks":1661564648.65,
/// "meta":1661564513.50,
/// "addons":1661564649.09,
/// "clients":1661564643.57,
/// ...
/// }```
/// ## `info/configuration`
/// ```json
/// {
/// "max_post_bytes":2097152,
/// "max_post_records":100,
/// "max_record_payload_bytes":2097152,
/// ...
/// }```
///
/// Given our definitions above, these are not any kind of "bso", so are
/// not relevant to this module
use crate::{Guid, ServerTimestamp};
use serde::{Deserialize, Serialize};
#[cfg(feature = "crypto")]
mod crypto;
#[cfg(feature = "crypto")]
pub use crypto::{IncomingEncryptedBso, OutgoingEncryptedBso};
mod content;
#[cfg(feature = "test-utils")]
pub mod test_utils;
/// An envelope for an incoming item. Envelopes carry all the metadata for
/// a Sync BSO record (`id`, `modified`, `sortindex`), *but not* the payload
/// itself.
#[derive(Debug, Clone, Deserialize)]
pub struct IncomingEnvelope {
/// The ID of the record.
pub id: Guid,
// If we don't give it a default, a small handful of tests fail.
// XXX - we should probably fix the tests and kill this?
#[serde(default = "ServerTimestamp::default")]
pub modified: ServerTimestamp,
pub sortindex: Option<i32>,
pub ttl: Option<u32>,
}
/// An envelope for an outgoing item. This is conceptually identical to
/// [IncomingEnvelope], but omits fields that are only set by the server,
/// like `modified`.
#[derive(Debug, Default, Clone, Serialize)]
pub struct OutgoingEnvelope {
/// The ID of the record.
pub id: Guid,
#[serde(skip_serializing_if = "Option::is_none")]
pub sortindex: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ttl: Option<u32>,
}
/// Allow an outgoing envelope to be constructed with just a guid when default
/// values for the other fields are OK.
impl From<Guid> for OutgoingEnvelope {
fn from(id: Guid) -> Self {
OutgoingEnvelope {
id,
..Default::default()
}
}
}
/// IncomingBso's can come from:
/// * Directly from the server (ie, some records aren't encrypted, such as meta/global)
/// * From environments where the encryption is done externally (eg, Rust syncing in Desktop
/// Firefox has the encryption/decryption done by Firefox and the cleartext BSOs are passed in.
/// * Read from the server as an EncryptedBso; see EncryptedBso description above.
#[derive(Deserialize, Debug)]
pub struct IncomingBso {
#[serde(flatten)]
pub envelope: IncomingEnvelope,
// payload is public for some edge-cases in some components, but in general,
// you should use into_content<> to get a record out of it.
pub payload: String,
}
impl IncomingBso {
pub fn new(envelope: IncomingEnvelope, payload: String) -> Self {
Self { envelope, payload }
}
}
#[derive(Serialize, Debug)]
pub struct OutgoingBso {
#[serde(flatten)]
pub envelope: OutgoingEnvelope,
// payload is public for some edge-cases in some components, but in general,
// you should use into_content<> to get a record out of it.
pub payload: String,
}
impl OutgoingBso {
/// Most consumers will use `self.from_content` and `self.from_content_with_id`
/// but this exists for the few consumers for whom that doesn't make sense.
pub fn new<T: Serialize>(
envelope: OutgoingEnvelope,
val: &T,
) -> Result<Self, serde_json::Error> {
Ok(Self {
envelope,
payload: serde_json::to_string(&val)?,
})
}
}
/// We also have the concept of "content", which helps work with a `T` which
/// is represented inside the payload. Real-world examples of a `T` include
/// Bookmarks or Tabs.
/// See the content module for the implementations.
///
/// So this all flows together in the following way:
/// * Incoming encrypted data:
/// EncryptedIncomingBso -> IncomingBso -> [specific engine] -> IncomingContent<T>
/// * Incoming cleartext data:
/// IncomingBso -> IncomingContent<T>
/// (Note that incoming cleartext only happens for a few collections managed by
/// the sync client and never by specific engines - engine BSOs are always encryted)
/// * Outgoing encrypted data:
/// OutgoingBso (created in the engine) -> [this crate] -> EncryptedOutgoingBso
/// * Outgoing cleartext data: just an OutgoingBso with no conversions needed.
/// [IncomingContent] is the result of converting an [IncomingBso] into
/// some <T> - it consumes the Bso, so you get the envelope, and the [IncomingKind]
/// which reflects the state of parsing the json.
#[derive(Debug)]
pub struct IncomingContent<T> {
pub envelope: IncomingEnvelope,
pub kind: IncomingKind<T>,
}
/// The "kind" of incoming content after deserializing it.
pub enum IncomingKind<T> {
/// A good, live T.
Content(T),
/// A record that used to be a T but has been replaced with a tombstone.
Tombstone,
/// Either not JSON, or can't be made into a T.
Malformed,
}

64
third_party/rust/sync15/src/bso/test_utils.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,64 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Utilities for tests to make IncomingBsos and Content from test data.
//! Note that this is only available if you enable the `test-utils` feature,
//! which external crates are able to do just for their tests.
use super::{IncomingBso, IncomingEnvelope, OutgoingBso};
use crate::{Guid, ServerTimestamp};
/// Tests often want an IncomingBso to test, and the easiest way is often to
/// create an OutgoingBso convert it back to an incoming.
impl OutgoingBso {
// These functions would ideally consume `self` and avoid the clones, but
// this is more convenient for some tests and the extra overhead doesn't
// really matter for tests.
/// When a test has an [OutgoingBso] and wants it as an [IncomingBso]
pub fn to_test_incoming(&self) -> IncomingBso {
self.to_test_incoming_ts(ServerTimestamp::default())
}
/// When a test has an [OutgoingBso] and wants it as an [IncomingBso] with a specific timestamp.
pub fn to_test_incoming_ts(&self, ts: ServerTimestamp) -> IncomingBso {
IncomingBso {
envelope: IncomingEnvelope {
id: self.envelope.id.clone(),
modified: ts,
sortindex: self.envelope.sortindex,
ttl: self.envelope.ttl,
},
payload: self.payload.clone(),
}
}
/// When a test has an [OutgoingBso] and wants it as an [IncomingBso] with a specific T.
pub fn to_test_incoming_t<T: for<'de> serde::Deserialize<'de>>(&self) -> T {
self.to_test_incoming().into_content().content().unwrap()
}
}
/// Helpers to create an IncomingBso from some T
impl IncomingBso {
/// When a test has an T and wants it as an [IncomingBso]
pub fn from_test_content<T: serde::Serialize>(json: T) -> Self {
// Go via an OutgoingBso
OutgoingBso::from_content_with_id(json)
.unwrap()
.to_test_incoming()
}
/// When a test has an T and wants it as an [IncomingBso] with a specific timestamp.
pub fn from_test_content_ts<T: serde::Serialize>(json: T, ts: ServerTimestamp) -> Self {
// Go via an OutgoingBso
OutgoingBso::from_content_with_id(json)
.unwrap()
.to_test_incoming_ts(ts)
}
/// When a test wants a new incoming tombstone.
pub fn new_test_tombstone(guid: Guid) -> Self {
OutgoingBso::new_tombstone(guid.into()).to_test_incoming()
}
}

449
third_party/rust/sync15/src/bso_record.rs поставляемый
Просмотреть файл

@ -1,449 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::error;
use crate::key_bundle::KeyBundle;
use crate::Payload;
use crate::ServerTimestamp;
use lazy_static::lazy_static;
use serde::de::{Deserialize, DeserializeOwned};
use serde::ser::Serialize;
use serde_derive::*;
use std::ops::{Deref, DerefMut};
use sync_guid::Guid;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct BsoRecord<T> {
pub id: Guid,
// It's not clear to me if this actually can be empty in practice.
// firefox-ios seems to think it can...
#[serde(default = "String::new")]
pub collection: String,
#[serde(skip_serializing)]
// If we don't give it a default, we fail to deserialize
// items we wrote out during tests and such.
#[serde(default = "ServerTimestamp::default")]
pub modified: ServerTimestamp,
#[serde(skip_serializing_if = "Option::is_none")]
pub sortindex: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ttl: Option<u32>,
// We do some serde magic here with serde to parse the payload from JSON as we deserialize.
// This avoids having a separate intermediate type that only exists so that we can deserialize
// it's payload field as JSON (Especially since this one is going to exist more-or-less just so
// that we can decrypt the data...)
#[serde(
with = "as_json",
bound(serialize = "T: Serialize", deserialize = "T: DeserializeOwned")
)]
pub payload: T,
}
impl<T> BsoRecord<T> {
#[inline]
pub fn map_payload<P, F>(self, mapper: F) -> BsoRecord<P>
where
F: FnOnce(T) -> P,
{
BsoRecord {
id: self.id,
collection: self.collection,
modified: self.modified,
sortindex: self.sortindex,
ttl: self.ttl,
payload: mapper(self.payload),
}
}
#[inline]
pub fn with_payload<P>(self, payload: P) -> BsoRecord<P> {
self.map_payload(|_| payload)
}
#[inline]
pub fn new_record(id: String, coll: String, payload: T) -> BsoRecord<T> {
BsoRecord {
id: id.into(),
collection: coll,
ttl: None,
sortindex: None,
modified: ServerTimestamp::default(),
payload,
}
}
pub fn try_map_payload<P, E>(
self,
mapper: impl FnOnce(T) -> Result<P, E>,
) -> Result<BsoRecord<P>, E> {
self.map_payload(mapper).transpose()
}
pub fn map_payload_or<P>(self, mapper: impl FnOnce(T) -> Option<P>) -> Option<BsoRecord<P>> {
self.map_payload(mapper).transpose()
}
#[inline]
pub fn into_timestamped_payload(self) -> (T, ServerTimestamp) {
(self.payload, self.modified)
}
}
impl<T> BsoRecord<Option<T>> {
/// Helper to improve ergonomics for handling records that might be tombstones.
#[inline]
pub fn transpose(self) -> Option<BsoRecord<T>> {
let BsoRecord {
id,
collection,
modified,
sortindex,
ttl,
payload,
} = self;
payload.map(|p| BsoRecord {
id,
collection,
modified,
sortindex,
ttl,
payload: p,
})
}
}
impl<T, E> BsoRecord<Result<T, E>> {
#[inline]
pub fn transpose(self) -> Result<BsoRecord<T>, E> {
let BsoRecord {
id,
collection,
modified,
sortindex,
ttl,
payload,
} = self;
match payload {
Ok(p) => Ok(BsoRecord {
id,
collection,
modified,
sortindex,
ttl,
payload: p,
}),
Err(e) => Err(e),
}
}
}
impl<T> Deref for BsoRecord<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.payload
}
}
impl<T> DerefMut for BsoRecord<T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
&mut self.payload
}
}
impl CleartextBso {
pub fn from_payload(mut payload: Payload, collection: impl Into<String>) -> Self {
let id = payload.id.clone();
let sortindex: Option<i32> = payload.take_auto_field("sortindex");
let ttl: Option<u32> = payload.take_auto_field("ttl");
BsoRecord {
id,
collection: collection.into(),
modified: ServerTimestamp::default(), // Doesn't matter.
sortindex,
ttl,
payload,
}
}
}
pub type EncryptedBso = BsoRecord<EncryptedPayload>;
pub type CleartextBso = BsoRecord<Payload>;
// Contains the methods to automatically deserialize the payload to/from json.
mod as_json {
use serde::de::{self, Deserialize, DeserializeOwned, Deserializer};
use serde::ser::{self, Serialize, Serializer};
pub fn serialize<T, S>(t: &T, serializer: S) -> Result<S::Ok, S::Error>
where
T: Serialize,
S: Serializer,
{
let j = serde_json::to_string(t).map_err(ser::Error::custom)?;
serializer.serialize_str(&j)
}
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
T: DeserializeOwned,
D: Deserializer<'de>,
{
let j = String::deserialize(deserializer)?;
serde_json::from_str(&j).map_err(de::Error::custom)
}
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct EncryptedPayload {
#[serde(rename = "IV")]
pub iv: String,
pub hmac: String,
pub ciphertext: String,
}
// This is a little cludgey but I couldn't think of another way to have easy deserialization
// without a bunch of wrapper types, while still only serializing a single time in the
// postqueue.
lazy_static! {
// The number of bytes taken up by padding in a EncryptedPayload.
static ref EMPTY_ENCRYPTED_PAYLOAD_SIZE: usize = serde_json::to_string(
&EncryptedPayload { iv: "".into(), hmac: "".into(), ciphertext: "".into() }
).unwrap().len();
}
impl EncryptedPayload {
#[inline]
pub fn serialized_len(&self) -> usize {
(*EMPTY_ENCRYPTED_PAYLOAD_SIZE) + self.ciphertext.len() + self.hmac.len() + self.iv.len()
}
pub fn decrypt_and_parse_payload<T>(&self, key: &KeyBundle) -> error::Result<T>
where
for<'a> T: Deserialize<'a>,
{
let cleartext = key.decrypt(&self.ciphertext, &self.iv, &self.hmac)?;
Ok(serde_json::from_str(&cleartext)?)
}
pub fn from_cleartext_payload<T: Serialize>(
key: &KeyBundle,
cleartext_payload: &T,
) -> error::Result<Self> {
let cleartext = serde_json::to_string(cleartext_payload)?;
let (enc_base64, iv_base64, hmac_base16) =
key.encrypt_bytes_rand_iv(cleartext.as_bytes())?;
Ok(EncryptedPayload {
iv: iv_base64,
hmac: hmac_base16,
ciphertext: enc_base64,
})
}
}
impl EncryptedBso {
pub fn decrypt(self, key: &KeyBundle) -> error::Result<CleartextBso> {
let new_payload = self
.payload
.decrypt_and_parse_payload::<Payload>(key)?
.with_auto_field("sortindex", self.sortindex)
.with_auto_field("ttl", self.ttl);
let result = self.with_payload(new_payload);
Ok(result)
}
pub fn decrypt_as<T>(self, key: &KeyBundle) -> error::Result<BsoRecord<T>>
where
for<'a> T: Deserialize<'a>,
{
self.decrypt(key)?.into_record::<T>()
}
}
impl CleartextBso {
pub fn encrypt(self, key: &KeyBundle) -> error::Result<EncryptedBso> {
let encrypted_payload = EncryptedPayload::from_cleartext_payload(key, &self.payload)?;
Ok(self.with_payload(encrypted_payload))
}
pub fn into_record<T>(self) -> error::Result<BsoRecord<T>>
where
for<'a> T: Deserialize<'a>,
{
Ok(self.try_map_payload(Payload::into_record)?)
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
use serde_json::Value as JsonValue;
#[test]
fn test_deserialize_enc() {
let serialized = r#"{
"id": "1234",
"collection": "passwords",
"modified": 12344321.0,
"payload": "{\"IV\": \"aaaaa\", \"hmac\": \"bbbbb\", \"ciphertext\": \"ccccc\"}"
}"#;
let record: BsoRecord<EncryptedPayload> = serde_json::from_str(serialized).unwrap();
assert_eq!(&record.id, "1234");
assert_eq!(&record.collection, "passwords");
assert_eq!((record.modified.0 - 12_344_321_000).abs(), 0);
assert_eq!(record.sortindex, None);
assert_eq!(&record.payload.iv, "aaaaa");
assert_eq!(&record.payload.hmac, "bbbbb");
assert_eq!(&record.payload.ciphertext, "ccccc");
}
#[test]
fn test_deserialize_autofields() {
let serialized = r#"{
"id": "1234",
"collection": "passwords",
"modified": 12344321.0,
"sortindex": 100,
"ttl": 99,
"payload": "{\"IV\": \"aaaaa\", \"hmac\": \"bbbbb\", \"ciphertext\": \"ccccc\"}"
}"#;
let record: BsoRecord<EncryptedPayload> = serde_json::from_str(serialized).unwrap();
assert_eq!(record.sortindex, Some(100));
assert_eq!(record.ttl, Some(99));
}
#[test]
fn test_serialize_enc() {
let goal = r#"{"id":"1234","collection":"passwords","payload":"{\"IV\":\"aaaaa\",\"hmac\":\"bbbbb\",\"ciphertext\":\"ccccc\"}"}"#;
let record = BsoRecord {
id: "1234".into(),
modified: ServerTimestamp(999), // shouldn't be serialized by client no matter what it's value is
collection: "passwords".into(),
sortindex: None,
ttl: None,
payload: EncryptedPayload {
iv: "aaaaa".into(),
hmac: "bbbbb".into(),
ciphertext: "ccccc".into(),
},
};
let actual = serde_json::to_string(&record).unwrap();
assert_eq!(actual, goal);
let val_str_payload: serde_json::Value = serde_json::from_str(goal).unwrap();
assert_eq!(
val_str_payload["payload"].as_str().unwrap().len(),
record.payload.serialized_len()
)
}
#[test]
fn test_roundtrip_crypt_tombstone() {
let orig_record = CleartextBso::from_payload(
Payload::from_json(json!({ "id": "aaaaaaaaaaaa", "deleted": true, })).unwrap(),
"dummy",
);
assert!(orig_record.is_tombstone());
let keybundle = KeyBundle::new_random().unwrap();
let encrypted = orig_record.clone().encrypt(&keybundle).unwrap();
// While we're here, check on EncryptedPayload::serialized_len
let val_rec =
serde_json::from_str::<JsonValue>(&serde_json::to_string(&encrypted).unwrap()).unwrap();
assert_eq!(
encrypted.payload.serialized_len(),
val_rec["payload"].as_str().unwrap().len()
);
let decrypted: CleartextBso = encrypted.decrypt(&keybundle).unwrap();
assert!(decrypted.is_tombstone());
assert_eq!(decrypted, orig_record);
}
#[test]
fn test_roundtrip_crypt_record() {
let payload = json!({ "id": "aaaaaaaaaaaa", "age": 105, "meta": "data" });
let orig_record =
CleartextBso::from_payload(Payload::from_json(payload.clone()).unwrap(), "dummy");
assert!(!orig_record.is_tombstone());
let keybundle = KeyBundle::new_random().unwrap();
let encrypted = orig_record.clone().encrypt(&keybundle).unwrap();
// While we're here, check on EncryptedPayload::serialized_len
let val_rec =
serde_json::from_str::<JsonValue>(&serde_json::to_string(&encrypted).unwrap()).unwrap();
assert_eq!(
encrypted.payload.serialized_len(),
val_rec["payload"].as_str().unwrap().len()
);
let decrypted = encrypted.decrypt(&keybundle).unwrap();
assert!(!decrypted.is_tombstone());
assert_eq!(decrypted, orig_record);
assert_eq!(serde_json::to_value(decrypted.payload).unwrap(), payload);
}
#[test]
fn test_record_auto_fields() {
let payload = json!({ "id": "aaaaaaaaaaaa", "age": 105, "meta": "data", "sortindex": 100, "ttl": 99 });
let bso = CleartextBso::from_payload(Payload::from_json(payload).unwrap(), "dummy");
// We don't want the keys ending up in the actual record data on the server.
assert!(!bso.payload.data.contains_key("sortindex"));
assert!(!bso.payload.data.contains_key("ttl"));
// But we do want them in the BsoRecord.
assert_eq!(bso.sortindex, Some(100));
assert_eq!(bso.ttl, Some(99));
let keybundle = KeyBundle::new_random().unwrap();
let encrypted = bso.encrypt(&keybundle).unwrap();
let decrypted = encrypted.decrypt(&keybundle).unwrap();
// We add auto fields during decryption.
assert_eq!(decrypted.payload.data["sortindex"], 100);
assert_eq!(decrypted.payload.data["ttl"], 99);
assert_eq!(decrypted.sortindex, Some(100));
assert_eq!(decrypted.ttl, Some(99));
}
#[test]
fn test_record_bad_hmac() {
let payload = json!({ "id": "aaaaaaaaaaaa", "age": 105, "meta": "data", "sortindex": 100, "ttl": 99 });
let bso = CleartextBso::from_payload(Payload::from_json(payload).unwrap(), "dummy");
let keybundle = KeyBundle::new_random().unwrap();
let encrypted = bso.encrypt(&keybundle).unwrap();
let keybundle2 = KeyBundle::new_random().unwrap();
let e = encrypted
.decrypt(&keybundle2)
.expect_err("Should fail because wrong keybundle");
// Note: Error isn't PartialEq, so.
match e {
error::Error::CryptoError(_) => {
// yay.
}
other => {
panic!("Expected Crypto Error, got {:?}", other);
}
}
}
}

Просмотреть файл

@ -6,16 +6,19 @@ use super::{
request::{NormalResponseHandler, UploadInfo},
CollState, Sync15ClientResponse, Sync15StorageClient,
};
use crate::bso::OutgoingEncryptedBso;
use crate::engine::{CollectionRequest, IncomingChangeset, OutgoingChangeset};
use crate::error::{self, Error, ErrorResponse, Result};
use crate::{CleartextBso, EncryptedBso, KeyBundle, ServerTimestamp};
use crate::{KeyBundle, ServerTimestamp};
use std::borrow::Cow;
pub fn encrypt_outgoing(o: OutgoingChangeset, key: &KeyBundle) -> Result<Vec<EncryptedBso>> {
let collection = o.collection;
pub fn encrypt_outgoing(
o: OutgoingChangeset,
key: &KeyBundle,
) -> Result<Vec<OutgoingEncryptedBso>> {
o.changes
.into_iter()
.map(|change| CleartextBso::from_payload(change, collection.clone()).encrypt(key))
.map(|change| change.into_encrypted(key))
.collect()
}
@ -43,19 +46,17 @@ pub fn fetch_incoming(
// That should cause us to re-read crypto/keys and things should
// work (although if for some reason crypto/keys was updated but
// not all storage was wiped we are probably screwed.)
let decrypted = record.decrypt(&state.key)?;
result.changes.push(decrypted.into_timestamped_payload());
result.changes.push(record.into_decrypted(&state.key)?);
}
Ok(result)
}
#[derive(Debug, Clone)]
pub struct CollectionUpdate<'a> {
client: &'a Sync15StorageClient,
state: &'a CollState,
collection: Cow<'static, str>,
xius: ServerTimestamp,
to_update: Vec<EncryptedBso>,
to_update: Vec<OutgoingEncryptedBso>,
fully_atomic: bool,
}
@ -65,7 +66,7 @@ impl<'a> CollectionUpdate<'a> {
state: &'a CollState,
collection: Cow<'static, str>,
xius: ServerTimestamp,
records: Vec<EncryptedBso>,
records: Vec<OutgoingEncryptedBso>,
fully_atomic: bool,
) -> CollectionUpdate<'a> {
CollectionUpdate {

Просмотреть файл

@ -4,8 +4,7 @@
use crate::error::Result;
use crate::record_types::CryptoKeysRecord;
use crate::ServerTimestamp;
use crate::{EncryptedPayload, KeyBundle};
use crate::{EncryptedPayload, KeyBundle, ServerTimestamp};
use std::collections::HashMap;
#[derive(Clone, Debug, PartialEq, Eq)]
@ -30,7 +29,7 @@ impl CollectionKeys {
timestamp: ServerTimestamp,
root_key: &KeyBundle,
) -> Result<CollectionKeys> {
let keys: CryptoKeysRecord = record.decrypt_and_parse_payload(root_key)?;
let keys: CryptoKeysRecord = record.decrypt_into(root_key)?;
Ok(CollectionKeys {
timestamp,
default: KeyBundle::from_base64(&keys.default[0], &keys.default[1])?,

45
third_party/rust/sync15/src/client/request.rs поставляемый
Просмотреть файл

@ -3,8 +3,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::storage_client::Sync15ClientResponse;
use crate::bso::OutgoingEncryptedBso;
use crate::error::{self, Error as ErrorKind, Result};
use crate::EncryptedBso;
use crate::ServerTimestamp;
use serde_derive::*;
use std::collections::HashMap;
@ -255,8 +255,8 @@ where
!matches!(&self.batch, BatchState::Unsupported | BatchState::NoBatch)
}
pub fn enqueue(&mut self, record: &EncryptedBso) -> Result<bool> {
let payload_length = record.payload.serialized_len();
pub fn enqueue(&mut self, record: &OutgoingEncryptedBso) -> Result<bool> {
let payload_length = record.serialized_payload_len();
if self.post_limits.can_never_add(payload_length)
|| self.batch_limits.can_never_add(payload_length)
@ -488,7 +488,8 @@ impl<Poster> PostQueue<Poster, NormalResponseHandler> {
#[cfg(test)]
mod test {
use super::*;
use crate::{BsoRecord, EncryptedPayload};
use crate::bso::{IncomingEncryptedBso, OutgoingEncryptedBso, OutgoingEnvelope};
use crate::EncryptedPayload;
use lazy_static::lazy_static;
use std::cell::RefCell;
use std::collections::VecDeque;
@ -509,7 +510,8 @@ mod test {
let values =
serde_json::from_str::<serde_json::Value>(&self.body).expect("Posted invalid json");
// Check that they actually deserialize as what we want
let records_or_err = serde_json::from_value::<Vec<EncryptedBso>>(values.clone());
let records_or_err =
serde_json::from_value::<Vec<IncomingEncryptedBso>>(values.clone());
records_or_err.expect("Failed to deserialize data");
serde_json::from_value(values).unwrap()
}
@ -703,18 +705,17 @@ mod test {
};
// ~80b
static ref TOTAL_RECORD_OVERHEAD: usize = {
let val = serde_json::to_value(BsoRecord {
id: "".into(),
collection: "".into(),
modified: ServerTimestamp(0),
sortindex: None,
ttl: None,
payload: EncryptedPayload {
let val = serde_json::to_value(OutgoingEncryptedBso::new(OutgoingEnvelope {
id: "".into(),
sortindex: None,
ttl: None,
},
EncryptedPayload {
iv: "".into(),
hmac: "".into(),
ciphertext: "".into()
},
}).unwrap();
)).unwrap();
serde_json::to_string(&val).unwrap().len()
};
// There's some subtlety in how we calulate this having to do with the fact that
@ -727,21 +728,21 @@ mod test {
}
// Actual record size (for max_request_len) will be larger by some amount
fn make_record(payload_size: usize) -> EncryptedBso {
fn make_record(payload_size: usize) -> OutgoingEncryptedBso {
assert!(payload_size > *PAYLOAD_OVERHEAD);
let ciphertext_len = payload_size - *PAYLOAD_OVERHEAD;
BsoRecord {
id: "".into(),
collection: "".into(),
modified: ServerTimestamp(0),
sortindex: None,
ttl: None,
payload: EncryptedPayload {
OutgoingEncryptedBso::new(
OutgoingEnvelope {
id: "".into(),
sortindex: None,
ttl: None,
},
EncryptedPayload {
iv: "".into(),
hmac: "".into(),
ciphertext: "x".repeat(ciphertext_len),
},
}
)
}
fn request_bytes_for_payloads(payloads: &[usize]) -> usize {

48
third_party/rust/sync15/src/client/state.rs поставляемый
Просмотреть файл

@ -7,13 +7,13 @@ use std::collections::{HashMap, HashSet};
use super::request::{InfoCollections, InfoConfiguration};
use super::storage_client::{SetupStorageClient, Sync15ClientResponse};
use super::CollectionKeys;
use crate::bso::OutgoingEncryptedBso;
use crate::error::{self, Error as ErrorKind, ErrorResponse};
use crate::record_types::{MetaGlobalEngine, MetaGlobalRecord};
use crate::ServerTimestamp;
use crate::{EncryptedBso, EncryptedPayload, KeyBundle};
use crate::EncryptedPayload;
use crate::{Guid, KeyBundle, ServerTimestamp};
use interrupt_support::Interruptee;
use serde_derive::*;
use sync_guid::Guid;
use self::SetupState::*;
@ -435,7 +435,7 @@ impl<'a> SetupStateMachine<'a> {
// Note that collection/keys is itself a bso, so the
// json body also carries the timestamp. If they aren't
// identical something has screwed up and we should die.
assert_eq!(last_modified, record.modified);
assert_eq!(last_modified, record.envelope.modified);
let state = GlobalState {
config,
collections,
@ -510,7 +510,7 @@ impl<'a> SetupStateMachine<'a> {
// ...And a fresh `crypto/keys`.
let new_keys = CollectionKeys::new_random()?.to_encrypted_payload(self.root_key)?;
let bso = EncryptedBso::new_record("keys".into(), "crypto".into(), new_keys);
let bso = OutgoingEncryptedBso::new(Guid::new("keys").into(), new_keys);
self.client
.put_crypto_keys(ServerTimestamp::default(), &bso)?;
@ -616,14 +616,14 @@ fn is_same_timestamp(local: ServerTimestamp, collections: &InfoCollections, key:
mod tests {
use super::*;
use crate::EncryptedBso;
use crate::bso::{IncomingEncryptedBso, IncomingEnvelope};
use interrupt_support::NeverInterrupts;
struct InMemoryClient {
info_configuration: error::Result<Sync15ClientResponse<InfoConfiguration>>,
info_collections: error::Result<Sync15ClientResponse<InfoCollections>>,
meta_global: error::Result<Sync15ClientResponse<MetaGlobalRecord>>,
crypto_keys: error::Result<Sync15ClientResponse<EncryptedBso>>,
crypto_keys: error::Result<Sync15ClientResponse<IncomingEncryptedBso>>,
}
impl SetupStorageClient for InMemoryClient {
@ -677,11 +677,24 @@ mod tests {
Ok(ServerTimestamp(xius.0 + 1))
}
fn fetch_crypto_keys(&self) -> error::Result<Sync15ClientResponse<EncryptedBso>> {
fn fetch_crypto_keys(&self) -> error::Result<Sync15ClientResponse<IncomingEncryptedBso>> {
match &self.crypto_keys {
Ok(keys) => Ok(keys.clone()),
Ok(Sync15ClientResponse::Success {
status,
record,
last_modified,
route,
}) => Ok(Sync15ClientResponse::Success {
status: *status,
record: IncomingEncryptedBso::new(
record.envelope.clone(),
record.payload.clone(),
),
last_modified: *last_modified,
route: route.clone(),
}),
// TODO(lina): Same as above, for 404s.
Err(_) => Ok(Sync15ClientResponse::Error(ErrorResponse::ServerError {
_ => Ok(Sync15ClientResponse::Error(ErrorResponse::ServerError {
status: 500,
route: "test/path".into(),
})),
@ -691,7 +704,7 @@ mod tests {
fn put_crypto_keys(
&self,
xius: ServerTimestamp,
_keys: &EncryptedBso,
_keys: &OutgoingEncryptedBso,
) -> error::Result<()> {
assert_eq!(xius, ServerTimestamp(888_800));
Err(ErrorKind::StorageHttpError(ErrorResponse::ServerError {
@ -722,11 +735,18 @@ mod tests {
fn mocked_success_keys(
keys: CollectionKeys,
root_key: &KeyBundle,
) -> error::Result<Sync15ClientResponse<EncryptedBso>> {
) -> error::Result<Sync15ClientResponse<IncomingEncryptedBso>> {
let timestamp = keys.timestamp;
let payload = keys.to_encrypted_payload(root_key).unwrap();
let mut bso = EncryptedBso::new_record("keys".into(), "crypto".into(), payload);
bso.modified = timestamp;
let bso = IncomingEncryptedBso::new(
IncomingEnvelope {
id: Guid::new("keys"),
modified: timestamp,
sortindex: None,
ttl: None,
},
payload,
);
Ok(Sync15ClientResponse::Success {
status: 200,
record: bso,

Просмотреть файл

@ -6,11 +6,11 @@ use super::request::{
BatchPoster, InfoCollections, InfoConfiguration, PostQueue, PostResponse, PostResponseHandler,
};
use super::token;
use crate::bso::{IncomingBso, IncomingEncryptedBso, OutgoingBso, OutgoingEncryptedBso};
use crate::engine::CollectionRequest;
use crate::error::{self, Error, ErrorResponse};
use crate::record_types::MetaGlobalRecord;
use crate::ServerTimestamp;
use crate::{BsoRecord, EncryptedBso};
use crate::{Guid, ServerTimestamp};
use serde_json::Value;
use std::str::FromStr;
use std::sync::atomic::{AtomicU32, Ordering};
@ -133,14 +133,18 @@ pub trait SetupStorageClient {
fn fetch_info_configuration(&self) -> error::Result<Sync15ClientResponse<InfoConfiguration>>;
fn fetch_info_collections(&self) -> error::Result<Sync15ClientResponse<InfoCollections>>;
fn fetch_meta_global(&self) -> error::Result<Sync15ClientResponse<MetaGlobalRecord>>;
fn fetch_crypto_keys(&self) -> error::Result<Sync15ClientResponse<EncryptedBso>>;
fn fetch_crypto_keys(&self) -> error::Result<Sync15ClientResponse<IncomingEncryptedBso>>;
fn put_meta_global(
&self,
xius: ServerTimestamp,
global: &MetaGlobalRecord,
) -> error::Result<ServerTimestamp>;
fn put_crypto_keys(&self, xius: ServerTimestamp, keys: &EncryptedBso) -> error::Result<()>;
fn put_crypto_keys(
&self,
xius: ServerTimestamp,
keys: &OutgoingEncryptedBso,
) -> error::Result<()>;
fn wipe_all_remote(&self) -> error::Result<()>;
}
@ -190,6 +194,12 @@ impl BackoffState {
}
}
// meta/global is a clear-text Bso (ie, there's a String `payload` which has a MetaGlobalRecord)
// We don't use the 'content' helpers here because we want json errors to be fatal here
// (ie, we don't need tombstones and can't just skip a malformed record)
type IncMetaGlobalBso = IncomingBso;
type OutMetaGlobalBso = OutgoingBso;
#[derive(Debug)]
pub struct Sync15StorageClient {
tsc: token::TokenProvider,
@ -206,8 +216,7 @@ impl SetupStorageClient for Sync15StorageClient {
}
fn fetch_meta_global(&self) -> error::Result<Sync15ClientResponse<MetaGlobalRecord>> {
// meta/global is a Bso, so there's an extra dance to do.
let got: Sync15ClientResponse<BsoRecord<MetaGlobalRecord>> =
let got: Sync15ClientResponse<IncMetaGlobalBso> =
self.relative_storage_request(Method::Get, "storage/meta/global")?;
Ok(match got {
Sync15ClientResponse::Success {
@ -218,11 +227,11 @@ impl SetupStorageClient for Sync15StorageClient {
} => {
log::debug!(
"Got meta global with modified = {}; last-modified = {}",
record.modified,
record.envelope.modified,
last_modified
);
Sync15ClientResponse::Success {
record: record.payload,
record: serde_json::from_str(&record.payload)?,
last_modified,
route,
status,
@ -232,7 +241,7 @@ impl SetupStorageClient for Sync15StorageClient {
})
}
fn fetch_crypto_keys(&self) -> error::Result<Sync15ClientResponse<EncryptedBso>> {
fn fetch_crypto_keys(&self) -> error::Result<Sync15ClientResponse<IncomingEncryptedBso>> {
self.relative_storage_request(Method::Get, "storage/crypto/keys")
}
@ -241,11 +250,15 @@ impl SetupStorageClient for Sync15StorageClient {
xius: ServerTimestamp,
global: &MetaGlobalRecord,
) -> error::Result<ServerTimestamp> {
let bso = BsoRecord::new_record("global".into(), "meta".into(), global);
let bso = OutMetaGlobalBso::new(Guid::new("global").into(), global)?;
self.put("storage/meta/global", xius, &bso)
}
fn put_crypto_keys(&self, xius: ServerTimestamp, keys: &EncryptedBso) -> error::Result<()> {
fn put_crypto_keys(
&self,
xius: ServerTimestamp,
keys: &OutgoingEncryptedBso,
) -> error::Result<()> {
self.put("storage/crypto/keys", xius, keys)?;
Ok(())
}
@ -281,7 +294,7 @@ impl Sync15StorageClient {
pub fn get_encrypted_records(
&self,
collection_request: &CollectionRequest,
) -> error::Result<Sync15ClientResponse<Vec<EncryptedBso>>> {
) -> error::Result<Sync15ClientResponse<Vec<IncomingEncryptedBso>>> {
self.collection_request(Method::Get, collection_request)
}

Просмотреть файл

@ -471,7 +471,8 @@ impl<'info, 'res, 'pgs, 'mcs> SyncMultipleDriver<'info, 'res, 'pgs, 'mcs> {
_ => {
// Don't log the error since it might contain sensitive
// info (although currently it only contains the declined engines list)
log::error!(
error_support::report_error!(
"sync15-prepare-persisted-state",
"Failed to parse PersistedGlobalState from JSON! Falling back to default"
);
*self.mem_cached_state = MemoryCachedState::default();

Просмотреть файл

@ -4,14 +4,14 @@
use std::collections::{HashMap, HashSet};
use crate::bso::{IncomingKind, OutgoingBso, OutgoingEnvelope};
use crate::client::{
CollState, CollectionKeys, CollectionUpdate, GlobalState, InfoConfiguration,
Sync15StorageClient,
};
use crate::client_types::{ClientData, RemoteClient};
use crate::engine::{CollectionRequest, IncomingChangeset, OutgoingChangeset};
use crate::error::Result;
use crate::{KeyBundle, Payload};
use crate::{error::Result, Guid, KeyBundle};
use interrupt_support::Interruptee;
use super::{
@ -62,22 +62,21 @@ impl<'a> Driver<'a> {
let mut has_own_client_record = false;
for (payload, _) in inbound.changes {
for bso in inbound.changes {
self.interruptee.err_if_interrupted()?;
// Check for a payload's tombstone
if payload.is_tombstone() {
log::debug!("Record has been deleted; skipping...");
continue;
}
let content = bso.into_content();
// Unpack the client record
let client: ClientRecord = match payload.into_record() {
Ok(client) => client,
Err(e) => {
log::debug!("Error in unpacking record: {:?}", e);
let client: ClientRecord = match content.kind {
IncomingKind::Malformed => {
log::debug!("Error unpacking record");
continue;
}
IncomingKind::Tombstone => {
log::debug!("Record has been deleted; skipping...");
continue;
}
IncomingKind::Content(client) => client,
};
if client.id == self.command_processor.settings().fxa_device_id {
@ -122,17 +121,16 @@ impl<'a> Driver<'a> {
// We periodically upload our own client record, even if it
// doesn't change, to keep it fresh.
// (but this part sucks - if the ttl on the server happens to be
// different (as some other client did something strange) we
// still want the records to compare equal - but the ttl hack
// doesn't allow that.)
let mut client_compare = client.clone();
client_compare.ttl = current_client_record.ttl;
if should_refresh_client || client_compare != current_client_record {
if should_refresh_client || client != current_client_record {
log::debug!("Will update our client record on the server");
let envelope = OutgoingEnvelope {
id: content.envelope.id,
ttl: Some(CLIENTS_TTL),
..Default::default()
};
outgoing
.changes
.push(Payload::from_record(current_client_record)?);
.push(OutgoingBso::from_content(envelope, current_client_record)?);
}
} else {
// Add the other client to our map of recently synced clients.
@ -173,10 +171,14 @@ impl<'a> Driver<'a> {
self.memcache_max_record_payload_size(),
)?;
// We want to ensure the TTL for all records we write, which
// may not be true for incoming ones - so make sure it is.
new_client.ttl = CLIENTS_TTL;
outgoing.changes.push(Payload::from_record(new_client)?);
let envelope = OutgoingEnvelope {
id: content.envelope.id,
ttl: Some(CLIENTS_TTL),
..Default::default()
};
outgoing
.changes
.push(OutgoingBso::from_content(envelope, new_client)?);
}
}
@ -184,9 +186,14 @@ impl<'a> Driver<'a> {
if !has_own_client_record {
let current_client_record = self.current_client_record();
self.note_recent_client(&current_client_record);
let envelope = OutgoingEnvelope {
id: Guid::new(&current_client_record.id),
ttl: Some(CLIENTS_TTL),
..Default::default()
};
outgoing
.changes
.push(Payload::from_record(current_client_record)?);
.push(OutgoingBso::from_content(envelope, current_client_record)?);
}
Ok(outgoing)
@ -208,7 +215,6 @@ impl<'a> Driver<'a> {
app_package: None,
application: None,
device: None,
ttl: CLIENTS_TTL,
}
}
@ -228,6 +234,8 @@ impl<'a> Driver<'a> {
/// use 512k to be safe (at the recommendation of the server team). Note
/// that if the server reports a lower limit (via info/configuration), we
/// respect that limit instead. See also bug 1403052.
/// XXX - the above comment is stale and refers to the world before the
/// move to spanner and the rust sync server.
fn memcache_max_record_payload_size(&self) -> usize {
self.max_record_payload_size().min(512 * 1024)
}
@ -354,12 +362,13 @@ impl<'a> Engine<'a> {
#[cfg(test)]
mod tests {
use super::super::{CommandStatus, DeviceType, Settings};
use super::*;
use crate::bso::IncomingBso;
use crate::ServerTimestamp;
use anyhow::Result;
use interrupt_support::NeverInterrupts;
use serde_json::{json, Value};
use super::*;
use std::iter::zip;
struct TestProcessor {
settings: Settings,
@ -392,7 +401,7 @@ mod tests {
if let Value::Array(clients) = clients {
let changes = clients
.into_iter()
.map(|c| (Payload::from_json(c).unwrap(), ServerTimestamp(0)))
.map(IncomingBso::from_test_content)
.collect();
IncomingChangeset {
changes,
@ -466,7 +475,9 @@ mod tests {
// Passing false for `should_refresh_client` - it should be ignored
// because we've changed the commands.
let mut outgoing = driver.sync(inbound, false).expect("Should sync clients");
outgoing.changes.sort_by(|a, b| a.id.cmp(&b.id));
outgoing
.changes
.sort_by(|a, b| a.envelope.id.cmp(&b.envelope.id));
// Make sure the list of recently synced remote clients is correct.
let expected_ids = &["deviceAAAAAA", "deviceBBBBBB", "deviceCCCCCC"];
@ -515,7 +526,6 @@ mod tests {
}],
"fxaDeviceId": "deviceAAAAAA",
"protocols": ["1.5"],
"ttl": CLIENTS_TTL,
}, {
"id": "deviceBBBBBB",
"name": "iPhone",
@ -530,7 +540,6 @@ mod tests {
"fxaDeviceId": "iPhooooooone",
"protocols": ["1.5"],
"device": "iPhone",
"ttl": CLIENTS_TTL,
}, {
"id": "deviceCCCCCC",
"name": "Fenix",
@ -543,11 +552,22 @@ mod tests {
"args": ["history"],
}],
"fxaDeviceId": "deviceCCCCCC",
"ttl": CLIENTS_TTL,
}]);
// turn outgoing into an incoming payload.
let incoming = IncomingChangeset {
changes: outgoing
.changes
.into_iter()
.map(|c| OutgoingBso::to_test_incoming(&c))
.collect(),
timestamp: outgoing.timestamp,
collection: outgoing.collection,
};
if let Value::Array(expected) = expected {
for (i, record) in expected.into_iter().enumerate() {
assert_eq!(outgoing.changes[i], Payload::from_json(record).unwrap());
for (incoming_cleartext, exp_client) in zip(incoming.changes, expected) {
let incoming_client: ClientRecord =
incoming_cleartext.into_content().content().unwrap();
assert_eq!(incoming_client, serde_json::from_value(exp_client).unwrap());
}
} else {
unreachable!("`expected_clients` must be an array of client records")
@ -635,7 +655,7 @@ mod tests {
let mut driver = Driver::new(&processor, &NeverInterrupts, &config);
let inbound = inbound_from_clients(json!([{
let test_clients = json!([{
"id": "deviceBBBBBB",
"name": "iPhone",
"type": "mobile",
@ -646,7 +666,6 @@ mod tests {
"fxaDeviceId": "iPhooooooone",
"protocols": ["1.5"],
"device": "iPhone",
"ttl": CLIENTS_TTL,
}, {
"id": "deviceAAAAAA",
"name": "Laptop",
@ -654,11 +673,10 @@ mod tests {
"commands": [],
"fxaDeviceId": "deviceAAAAAA",
"protocols": ["1.5"],
"ttl": CLIENTS_TTL,
}]));
}]);
let outgoing = driver
.sync(inbound.clone(), false)
.sync(inbound_from_clients(test_clients.clone()), false)
.expect("Should sync clients");
// should be no outgoing changes.
assert_eq!(outgoing.changes.len(), 0);
@ -671,7 +689,9 @@ mod tests {
assert_eq!(actual_ids, expected_ids);
// Do it again - still no changes, but force a refresh.
let outgoing = driver.sync(inbound, true).expect("Should sync clients");
let outgoing = driver
.sync(inbound_from_clients(test_clients), true)
.expect("Should sync clients");
assert_eq!(outgoing.changes.len(), 1);
// Do it again - but this time with our own client record needing
@ -720,7 +740,7 @@ mod tests {
let inbound = if let Value::Array(clients) = clients {
let changes = clients
.into_iter()
.map(|c| (Payload::from_json(c).unwrap(), ServerTimestamp(0)))
.map(IncomingBso::from_test_content)
.collect();
IncomingChangeset {
changes,
@ -734,7 +754,9 @@ mod tests {
// Passing false here for should_refresh_client, but it should be
// ignored as we don't have an existing record yet.
let mut outgoing = driver.sync(inbound, false).expect("Should sync clients");
outgoing.changes.sort_by(|a, b| a.id.cmp(&b.id));
outgoing
.changes
.sort_by(|a, b| a.envelope.id.cmp(&b.envelope.id));
// Make sure the list of recently synced remote clients is correct.
let expected_ids = &["deviceAAAAAA", "deviceBBBBBB"];
@ -770,8 +792,20 @@ mod tests {
"ttl": CLIENTS_TTL,
}]);
if let Value::Array(expected) = expected {
for (i, record) in expected.into_iter().enumerate() {
assert_eq!(outgoing.changes[i], Payload::from_json(record).unwrap());
// turn outgoing into an incoming payload.
let incoming = IncomingChangeset {
changes: outgoing
.changes
.into_iter()
.map(|c| OutgoingBso::to_test_incoming(&c))
.collect(),
timestamp: outgoing.timestamp,
collection: outgoing.collection,
};
for (incoming_cleartext, record) in zip(incoming.changes, expected) {
let incoming_client: ClientRecord =
incoming_cleartext.into_content().content().unwrap();
assert_eq!(incoming_client, serde_json::from_value(record).unwrap());
}
} else {
unreachable!("`expected_clients` must be an array of client records")

Просмотреть файл

@ -55,14 +55,6 @@ pub struct ClientRecord {
/// (`fxa_device_id`).
#[serde(default, skip_serializing_if = "Option::is_none")]
pub device: Option<String>,
// This field is somewhat magic - it's moved to and from the
// BSO record, so is not expected to be on the unencrypted payload
// when incoming and are not put on the unencrypted payload when outgoing.
// There are hysterical raisens for this, which we should fix.
// https://github.com/mozilla/application-services/issues/2712
#[serde(default)]
pub ttl: u32,
}
impl From<&ClientRecord> for crate::RemoteClient {
@ -130,33 +122,3 @@ impl From<Command> for CommandRecord {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Payload;
#[test]
fn test_ttl() {
// The ttl hacks in place mean that magically the ttl field from the
// client record should make it down to a BSO.
let record = ClientRecord {
id: "id".into(),
name: "my device".into(),
typ: Some(crate::DeviceType::VR),
commands: Vec::new(),
fxa_device_id: Some("12345".into()),
version: None,
protocols: vec!["1.5".into()],
form_factor: None,
os: None,
app_package: None,
application: None,
device: None,
ttl: 123,
};
let p = Payload::from_record(record).unwrap();
let bso = crate::CleartextBso::from_payload(p, "clients");
assert_eq!(bso.ttl, Some(123));
}
}

110
third_party/rust/sync15/src/enc_payload.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,110 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::error;
use crate::key_bundle::KeyBundle;
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
/// A representation of an encrypted payload. Used as the payload in EncryptedBso and
/// also anywhere else the sync keys might be used to encrypt/decrypt, such as send-tab payloads.
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct EncryptedPayload {
#[serde(rename = "IV")]
pub iv: String,
pub hmac: String,
pub ciphertext: String,
}
impl EncryptedPayload {
#[inline]
pub fn serialized_len(&self) -> usize {
(*EMPTY_ENCRYPTED_PAYLOAD_SIZE) + self.ciphertext.len() + self.hmac.len() + self.iv.len()
}
pub fn decrypt(&self, key: &KeyBundle) -> error::Result<String> {
key.decrypt(&self.ciphertext, &self.iv, &self.hmac)
}
pub fn decrypt_into<T>(&self, key: &KeyBundle) -> error::Result<T>
where
for<'a> T: Deserialize<'a>,
{
Ok(serde_json::from_str(&self.decrypt(key)?)?)
}
pub fn from_cleartext(key: &KeyBundle, cleartext: String) -> error::Result<Self> {
let (enc_base64, iv_base64, hmac_base16) =
key.encrypt_bytes_rand_iv(cleartext.as_bytes())?;
Ok(EncryptedPayload {
iv: iv_base64,
hmac: hmac_base16,
ciphertext: enc_base64,
})
}
pub fn from_cleartext_payload<T: Serialize>(
key: &KeyBundle,
cleartext_payload: &T,
) -> error::Result<Self> {
Self::from_cleartext(key, serde_json::to_string(cleartext_payload)?)
}
}
// Our "postqueue", which chunks records for upload, needs to know this value.
// It's tricky to determine at compile time, so do it once at at runtime.
lazy_static! {
// The number of bytes taken up by padding in a EncryptedPayload.
static ref EMPTY_ENCRYPTED_PAYLOAD_SIZE: usize = serde_json::to_string(
&EncryptedPayload { iv: "".into(), hmac: "".into(), ciphertext: "".into() }
).unwrap().len();
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[derive(Serialize, Deserialize, Debug)]
struct TestStruct {
id: String,
age: u32,
meta: String,
}
#[test]
fn test_roundtrip_crypt_record() {
let key = KeyBundle::new_random().unwrap();
let payload_json = json!({ "id": "aaaaaaaaaaaa", "age": 105, "meta": "data" });
let payload =
EncryptedPayload::from_cleartext(&key, serde_json::to_string(&payload_json).unwrap())
.unwrap();
let record = payload.decrypt_into::<TestStruct>(&key).unwrap();
assert_eq!(record.id, "aaaaaaaaaaaa");
assert_eq!(record.age, 105);
assert_eq!(record.meta, "data");
// While we're here, check on EncryptedPayload::serialized_len
let val_rec = serde_json::to_string(&serde_json::to_value(&payload).unwrap()).unwrap();
assert_eq!(payload.serialized_len(), val_rec.len());
}
#[test]
fn test_record_bad_hmac() {
let key1 = KeyBundle::new_random().unwrap();
let json = json!({ "id": "aaaaaaaaaaaa", "deleted": true, });
let payload =
EncryptedPayload::from_cleartext(&key1, serde_json::to_string(&json).unwrap()).unwrap();
let key2 = KeyBundle::new_random().unwrap();
let e = payload
.decrypt(&key2)
.expect_err("Should fail because wrong keybundle");
// Note: ErrorKind isn't PartialEq, so.
assert!(matches!(e, error::Error::CryptoError(_)));
}
}

Просмотреть файл

@ -2,11 +2,9 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{error::Error, fmt};
use crate::bso::{IncomingBso, OutgoingBso};
use serde::{Deserialize, Serialize};
use crate::{Guid, Payload, ServerTimestamp};
use crate::Guid;
/// A BridgedEngine acts as a bridge between application-services, rust
/// implemented sync engines and sync engines as defined by Desktop Firefox.
@ -64,7 +62,7 @@ pub trait BridgedEngine {
/// times per sync, once for each batch. Implementations can use the
/// signal to check if the operation was aborted, and cancel any
/// pending work.
fn store_incoming(&self, incoming_payloads: &[IncomingEnvelope]) -> Result<(), Self::Error>;
fn store_incoming(&self, incoming_records: Vec<IncomingBso>) -> Result<(), Self::Error>;
/// Applies all staged records, reconciling changes on both sides and
/// resolving conflicts. Returns a list of records to upload.
@ -91,10 +89,12 @@ pub trait BridgedEngine {
fn wipe(&self) -> Result<(), Self::Error>;
}
#[derive(Clone, Debug, Default)]
// TODO: We should replace this with OutgoingChangeset to reduce the number
// of types engines need to deal with.
#[derive(Debug, Default)]
pub struct ApplyResults {
/// List of records
pub envelopes: Vec<OutgoingEnvelope>,
pub records: Vec<OutgoingBso>,
/// The number of incoming records whose contents were merged because they
/// changed on both sides. None indicates we aren't reporting this
/// information.
@ -102,125 +102,20 @@ pub struct ApplyResults {
}
impl ApplyResults {
pub fn new(envelopes: Vec<OutgoingEnvelope>, num_reconciled: impl Into<Option<usize>>) -> Self {
pub fn new(records: Vec<OutgoingBso>, num_reconciled: impl Into<Option<usize>>) -> Self {
Self {
envelopes,
records,
num_reconciled: num_reconciled.into(),
}
}
}
// Shorthand for engines that don't care.
impl From<Vec<OutgoingEnvelope>> for ApplyResults {
fn from(envelopes: Vec<OutgoingEnvelope>) -> Self {
impl From<Vec<OutgoingBso>> for ApplyResults {
fn from(records: Vec<OutgoingBso>) -> Self {
Self {
envelopes,
records,
num_reconciled: None,
}
}
}
/// An envelope for an incoming item, passed to `BridgedEngine::store_incoming`.
/// Envelopes are a halfway point between BSOs, the format used for all items on
/// the Sync server, and records, which are specific to each engine.
///
/// Specifically, the "envelope" has all the metadata plus the JSON payload
/// as clear-text - the analogy is that it's got all the info needed to get the
/// data from the server to the engine without knowing what the contents holds,
/// and without the engine needing to know how to decrypt.
///
/// A BSO is a JSON object with metadata fields (`id`, `modifed`, `sortindex`),
/// and a BSO payload that is itself a JSON string. For encrypted records, the
/// BSO payload has a ciphertext, which must be decrypted to yield a cleartext.
/// The payload is a cleartext JSON string (that's three levels of JSON wrapping, if
/// you're keeping score: the BSO itself, BSO payload, and our sub-payload) with the
/// actual content payload.
///
/// An envelope combines the metadata fields from the BSO, and the cleartext
/// BSO payload.
#[derive(Clone, Debug, Deserialize)]
pub struct IncomingEnvelope {
pub id: Guid,
pub modified: ServerTimestamp,
#[serde(default)]
pub sortindex: Option<i32>,
#[serde(default)]
pub ttl: Option<u32>,
// Don't provide access to the cleartext payload directly. We want all
// callers to use `payload()` to convert/validate the string.
payload: String,
}
impl IncomingEnvelope {
/// Parses and returns the record payload from this envelope. Returns an
/// error if the envelope's payload isn't valid.
pub fn payload(&self) -> Result<Payload, PayloadError> {
let payload: Payload = serde_json::from_str(&self.payload)?;
if payload.id != self.id {
return Err(PayloadError::MismatchedId {
envelope: self.id.clone(),
payload: payload.id,
});
}
// Remove auto field data from payload and replace with real data
Ok(payload
.with_auto_field("ttl", self.ttl)
.with_auto_field("sortindex", self.sortindex))
}
}
/// An envelope for an outgoing item, returned from `BridgedEngine::apply`. This
/// is conceptually identical to [IncomingEnvelope], but omits fields that are
/// only set by the server, like `modified`.
#[derive(Clone, Debug, Serialize)]
pub struct OutgoingEnvelope {
id: Guid,
payload: String,
sortindex: Option<i32>,
ttl: Option<u32>,
}
impl From<Payload> for OutgoingEnvelope {
fn from(mut payload: Payload) -> Self {
let id = payload.id.clone();
// Remove auto field data from OutgoingEnvelope payload
let ttl = payload.take_auto_field("ttl");
let sortindex = payload.take_auto_field("sortindex");
OutgoingEnvelope {
id,
payload: payload.into_json_string(),
sortindex,
ttl,
}
}
}
/// An error that indicates a payload is invalid.
#[derive(Debug)]
pub enum PayloadError {
/// The payload contains invalid JSON.
Invalid(serde_json::Error),
/// The ID of the BSO in the envelope doesn't match the ID in the payload.
MismatchedId { envelope: Guid, payload: Guid },
}
impl Error for PayloadError {}
impl From<serde_json::Error> for PayloadError {
fn from(err: serde_json::Error) -> PayloadError {
PayloadError::Invalid(err)
}
}
impl fmt::Display for PayloadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PayloadError::Invalid(err) => err.fmt(f),
PayloadError::MismatchedId { envelope, payload } => write!(
f,
"ID `{}` in envelope doesn't match `{}` in payload",
envelope, payload
),
}
}
}

Просмотреть файл

@ -2,11 +2,12 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{Payload, ServerTimestamp};
use crate::bso::{IncomingBso, OutgoingBso};
use crate::ServerTimestamp;
#[derive(Debug, Clone)]
pub struct RecordChangeset<P> {
pub changes: Vec<P>,
pub struct RecordChangeset<T> {
pub changes: Vec<T>,
/// For GETs, the last sync timestamp that should be persisted after
/// applying the records.
/// For POSTs, this is the XIUS timestamp.
@ -14,18 +15,26 @@ pub struct RecordChangeset<P> {
pub collection: std::borrow::Cow<'static, str>,
}
pub type IncomingChangeset = RecordChangeset<(Payload, ServerTimestamp)>;
pub type OutgoingChangeset = RecordChangeset<Payload>;
pub type IncomingChangeset = RecordChangeset<IncomingBso>;
pub type OutgoingChangeset = RecordChangeset<OutgoingBso>;
// TODO: use a trait to unify this with the non-json versions
impl<T> RecordChangeset<T> {
#[inline]
pub fn new(
collection: impl Into<std::borrow::Cow<'static, str>>,
timestamp: ServerTimestamp,
) -> RecordChangeset<T> {
Self::new_with_changes(collection, timestamp, Vec::new())
}
#[inline]
pub fn new_with_changes(
collection: impl Into<std::borrow::Cow<'static, str>>,
timestamp: ServerTimestamp,
changes: Vec<T>,
) -> RecordChangeset<T> {
RecordChangeset {
changes: vec![],
changes,
timestamp,
collection: collection.into(),
}

4
third_party/rust/sync15/src/engine/mod.rs поставляемый
Просмотреть файл

@ -30,9 +30,7 @@ mod changeset;
mod request;
mod sync_engine;
pub use bridged_engine::{
ApplyResults, BridgedEngine, IncomingEnvelope, OutgoingEnvelope, PayloadError,
};
pub use bridged_engine::{ApplyResults, BridgedEngine};
pub use changeset::{IncomingChangeset, OutgoingChangeset};
pub use request::{CollectionRequest, RequestOrder};
pub use sync_engine::{CollSyncIds, EngineSyncAssociation, SyncEngine, SyncEngineId};

18
third_party/rust/sync15/src/key_bundle.rs поставляемый
Просмотреть файл

@ -25,11 +25,19 @@ impl KeyBundle {
/// Panics (asserts) if they aren't both 32 bytes.
pub fn new(enc: Vec<u8>, mac: Vec<u8>) -> Result<KeyBundle> {
if enc.len() != 32 {
log::error!("Bad key length (enc_key): {} != 32", enc.len());
error_support::report_error!(
"sync15-key-bundle",
"Bad key length (enc_key): {} != 32",
enc.len()
);
return Err(Error::BadKeyLength("enc_key", enc.len(), 32));
}
if mac.len() != 32 {
log::error!("Bad key length (mac_key): {} != 32", mac.len());
error_support::report_error!(
"sync15-key-bundle",
"Bad key length (mac_key): {} != 32",
mac.len()
);
return Err(Error::BadKeyLength("mac_key", mac.len(), 32));
}
Ok(KeyBundle {
@ -46,7 +54,11 @@ impl KeyBundle {
pub fn from_ksync_bytes(ksync: &[u8]) -> Result<KeyBundle> {
if ksync.len() != 64 {
log::error!("Bad key length (kSync): {} != 64", ksync.len());
error_support::report_error!(
"sync15-key-bundle",
"Bad key length (kSync): {} != 64",
ksync.len()
);
return Err(Error::BadKeyLength("kSync", ksync.len(), 64));
}
Ok(KeyBundle {

9
third_party/rust/sync15/src/lib.rs поставляемый
Просмотреть файл

@ -5,8 +5,7 @@
#![allow(unknown_lints, clippy::implicit_hasher)]
#![warn(rust_2018_idioms)]
#[cfg(feature = "crypto")]
mod bso_record;
pub mod bso;
#[cfg(feature = "sync-client")]
pub mod client;
// Types to describe client records
@ -15,12 +14,13 @@ mod client_types;
// things too nested at this stage...
#[cfg(feature = "sync-client")]
pub mod clients_engine;
#[cfg(feature = "crypto")]
mod enc_payload;
#[cfg(feature = "sync-engine")]
pub mod engine;
mod error;
#[cfg(feature = "crypto")]
mod key_bundle;
mod payload;
mod record_types;
mod server_timestamp;
pub mod telemetry;
@ -28,10 +28,9 @@ pub mod telemetry;
pub use crate::client_types::{ClientData, DeviceType, RemoteClient};
pub use crate::error::{Error, Result};
#[cfg(feature = "crypto")]
pub use bso_record::{BsoRecord, CleartextBso, EncryptedBso, EncryptedPayload};
pub use enc_payload::EncryptedPayload;
#[cfg(feature = "crypto")]
pub use key_bundle::KeyBundle;
pub use payload::Payload;
pub use server_timestamp::ServerTimestamp;
pub use sync_guid::Guid;

154
third_party/rust/sync15/src/payload.rs поставляемый
Просмотреть файл

@ -1,154 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::Guid;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value as JsonValue};
/// Represents the decrypted payload in a Bso. Provides a minimal layer of type
/// safety to avoid double-encrypting.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Payload {
pub id: Guid,
#[serde(default)]
#[serde(skip_serializing_if = "crate::skip_if_default")]
pub deleted: bool,
#[serde(flatten)]
pub data: Map<String, JsonValue>,
}
impl Payload {
pub fn new_tombstone(id: impl Into<Guid>) -> Payload {
Payload {
id: id.into(),
deleted: true,
data: Map::new(),
}
}
pub fn new_tombstone_with_ttl(id: impl Into<Guid>, ttl: u32) -> Payload {
let mut result = Payload::new_tombstone(id);
result.data.insert("ttl".into(), ttl.into());
result
}
#[inline]
pub fn with_sortindex(mut self, index: i32) -> Payload {
self.data.insert("sortindex".into(), index.into());
self
}
/// "Auto" fields are fields like 'sortindex' and 'ttl', which are:
///
/// - Added to the payload automatically when deserializing if present on
/// the incoming BSO or envelope.
/// - Removed from the payload automatically and attached to the BSO or
/// envelope if present on the outgoing payload.
pub fn with_auto_field<T: Into<JsonValue>>(mut self, name: &str, v: Option<T>) -> Payload {
let old_value: Option<JsonValue> = if let Some(value) = v {
self.data.insert(name.into(), value.into())
} else {
self.data.remove(name)
};
// This is a little dubious, but it seems like if we have a e.g. `sortindex` field on the payload
// it's going to be a bug if we use it instead of the "real" sort index.
if let Some(old_value) = old_value {
log::warn!(
"Payload for record {} already contains 'automatic' field \"{}\"? \
Overwriting auto value: {} with 'real' value",
self.id,
name,
old_value,
);
}
self
}
pub fn take_auto_field<V>(&mut self, name: &str) -> Option<V>
where
for<'a> V: Deserialize<'a>,
{
let v = self.data.remove(name)?;
match serde_json::from_value(v) {
Ok(v) => Some(v),
Err(e) => {
log::error!(
"Automatic field {} exists on payload, but cannot be deserialized: {}",
name,
e
);
None
}
}
}
#[inline]
pub fn id(&self) -> &str {
&self.id[..]
}
#[inline]
pub fn is_tombstone(&self) -> bool {
self.deleted
}
pub fn from_json(value: JsonValue) -> Result<Payload, serde_json::Error> {
serde_json::from_value(value)
}
/// Deserializes the BSO payload into a specific record type `T`.
///
/// BSO payloads are unstructured JSON objects, with string keys and
/// dynamically-typed values. `into_record` makes it more convenient to
/// work with payloads by converting them into data type-specific structs.
/// Your record type only needs to derive or implement `serde::Deserialize`;
/// Serde will take care of the rest.
///
/// # Errors
///
/// `into_record` returns errors for type mismatches. As an example, trying
/// to deserialize a string value from the payload into an integer field in
/// `T` will fail.
///
/// If there's a chance that a field contains invalid or mistyped data,
/// you'll want to extract it from `payload.data` manually, instead of using
/// `into_record`. This has been seen in the wild: for example, `dateAdded`
/// for bookmarks can be either an integer or a string.
pub fn into_record<T>(self) -> Result<T, serde_json::Error>
where
for<'a> T: Deserialize<'a>,
{
serde_json::from_value(JsonValue::from(self))
}
pub fn from_record<T: Serialize>(v: T) -> Result<Payload, serde_json::Error> {
// TODO(issue #2588): This is kind of dumb, we do to_value and then
// from_value. In general a more strongly typed API would help us avoid
// this sort of thing... But also concretely this could probably be
// avoided? At least in some cases.
Payload::from_json(serde_json::to_value(v)?)
}
pub fn into_json_string(self) -> String {
serde_json::to_string(&JsonValue::from(self))
.expect("JSON.stringify failed, which shouldn't be possible")
}
}
impl From<Payload> for JsonValue {
fn from(cleartext: Payload) -> Self {
let Payload {
mut data,
id,
deleted,
} = cleartext;
data.insert("id".to_string(), JsonValue::String(id.into_string()));
if deleted {
data.insert("deleted".to_string(), JsonValue::Bool(true));
}
JsonValue::Object(data)
}
}

Просмотреть файл

@ -12,7 +12,7 @@ impl ServerTimestamp {
pub fn from_float_seconds(ts: f64) -> Self {
let rf = (ts * 1000.0).round();
if !rf.is_finite() || rf < 0.0 || rf >= i64::max_value() as f64 {
log::error!("Illegal timestamp: {}", ts);
error_support::report_error!("sync15-illegal-timestamp", "Illegal timestamp: {}", ts);
ServerTimestamp(0)
} else {
ServerTimestamp(rf as i64)
@ -25,7 +25,11 @@ impl ServerTimestamp {
if ts >= 0 {
Self(ts)
} else {
log::error!("Illegal timestamp, substituting 0: {}", ts);
error_support::report_error!(
"sync15-illegal-timestamp",
"Illegal timestamp, substituting 0: {}",
ts
);
Self(0)
}
}

2
third_party/rust/tabs/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"1fcfae54aa01f8344623621747bc61a338adc333106871f8f9fd44d0c53ab2a1","README.md":"c48b8f391ef822c4f3971b5f453a1e7b43bea232752d520460d2f04803aead1a","build.rs":"024918c1d468c8dae03e4edaad14d827b7ebe7995809a8fe99efb1d9faa1206a","src/error.rs":"83a9a80b4b0405a3f62876ef9046bcbf769ce61889f9d1d3f43c2b697c1b0ec7","src/lib.rs":"fcd82e1c98ad6de8a1aa4a26a55d5dd8f65027b39db5eaf1c037b6c9b5b179a2","src/schema.rs":"0f1c847b44733bfe44b5aec5ff807771e605e3e7302bd9b31a103f530edc4355","src/storage.rs":"778224dd3bcf3ed93b2a8eaa58306e3b3cd0e7f9f40b238fcc20b381af0e6e21","src/store.rs":"ab0b6214b30b0f0fa7c6a89098ff3db1a8f76264f6711c4481c0be460afe522b","src/sync/bridge.rs":"18e890529cadd67b1cf62968e224efa986a996393fd6e3bfcc5bd335846ab5fa","src/sync/engine.rs":"64e01b9f187603bfa727bb54f547d0b7b4ce0f3e50a0e6f638788529345c9207","src/sync/full_sync.rs":"e7837722d7c250e1653fef453338dae322aaf25f96a399d001e2b1bfdea894c8","src/sync/mod.rs":"2ebf9281101988efdcbec98d712b515101412161cb30176624772fcb4a9fba02","src/sync/record.rs":"a3f7dd114a1e3f2e3483bbccc3f91737ae18e5c118a5437203523dd2793ef370","src/tabs.udl":"a40c17ef513cb3c86c3148e0f1bdafebe618025046bb97ca1ad511d45cc76d34","uniffi.toml":"5156701368f0b5856e658143714a43058385c8ac53bee72d7a5a332b576dfb82"},"package":null}
{"files":{"Cargo.toml":"a18056939895f26f34ddd5296873d014253c0441fc5d60f16f4b938602515e35","README.md":"c48b8f391ef822c4f3971b5f453a1e7b43bea232752d520460d2f04803aead1a","build.rs":"024918c1d468c8dae03e4edaad14d827b7ebe7995809a8fe99efb1d9faa1206a","src/error.rs":"ac3d450f0ba6a855c37fa2dd829004b22dce5ad4416ebec66a3d7d6212bdbcd7","src/lib.rs":"d7eff9e1c28f88a48bfffa5acc0e8da12336c0c6ad55f5df211da4720927cce7","src/schema.rs":"19941d1291500c20eff467caa57fa845d4902b349d80508edc5d453725d1d870","src/storage.rs":"5299a5424a65f513300d28d62e242fb8f662c3c610b221d21497926551585226","src/store.rs":"ab0b6214b30b0f0fa7c6a89098ff3db1a8f76264f6711c4481c0be460afe522b","src/sync/bridge.rs":"138b157d4065b448e8776bdf69a2a85d9e6895b2b29c9471b80e0caf062466b8","src/sync/engine.rs":"7d23cc163669ba2aa7242f42a0254fc67008803495a2aefc39de859d34596e10","src/sync/full_sync.rs":"412d24231f7a0c9a796c2afe482bc520d435f980bcba2841628c1147f6cbf479","src/sync/mod.rs":"8f0544ea54ad3f6daaf2650242d009229e09643ac9396e55ba111f2635cad232","src/sync/record.rs":"353b8e62f4f1a85edd8d693102d288ba59978e702e2c18a9c7415d731342b4d9","src/tabs.udl":"a555fe11b5fa7ea9aefa7d7be31906a63b31cbc16b9b7f5ad952fd0e08ba5c61","uniffi.toml":"5156701368f0b5856e658143714a43058385c8ac53bee72d7a5a332b576dfb82"},"package":null}

3
third_party/rust/tabs/Cargo.toml поставляемый
Просмотреть файл

@ -38,8 +38,9 @@ uniffi_macros = "^0.21"
url = "2.1" # mozilla-central can't yet take 2.2 (see bug 1734538)
[dev-dependencies]
tempfile = "3.1"
env_logger = { version = "0.8.0", default-features = false, features = ["termcolor", "atty", "humantime"] }
sync15 = { path = "../sync15", features = ["test-utils"] }
tempfile = "3.1"
[build-dependencies]
uniffi_build = { version = "^0.21", features = [ "builtin-bindgen" ]}

63
third_party/rust/tabs/src/error.rs поставляемый
Просмотреть файл

@ -2,8 +2,29 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use error_support::{ErrorHandling, GetErrorHandling};
/// Result enum for the public interface
pub type ApiResult<T> = std::result::Result<T, TabsApiError>;
/// Result enum for internal functions
pub type Result<T> = std::result::Result<T, Error>;
// Errors we return via the public interface.
#[derive(Debug, thiserror::Error)]
pub enum TabsError {
pub enum TabsApiError {
#[error("SyncError: {reason}")]
SyncError { reason: String },
#[error("SqlError: {reason}")]
SqlError { reason: String },
#[error("Unexpected tabs error: {reason}")]
UnexpectedTabsError { reason: String },
}
// Error we use internally
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[cfg(feature = "full-sync")]
#[error("Error synchronizing: {0}")]
SyncAdapterError(#[from] sync15::Error),
@ -15,9 +36,6 @@ pub enum TabsError {
#[error("Sync feature is disabled: {0}")]
SyncAdapterError(String),
#[error("Sync reset error: {0}")]
SyncResetError(#[from] anyhow::Error),
#[error("Error parsing JSON data: {0}")]
JsonError(#[from] serde_json::Error),
@ -34,4 +52,39 @@ pub enum TabsError {
OpenDatabaseError(#[from] sql_support::open_database::Error),
}
pub type Result<T> = std::result::Result<T, TabsError>;
// Define how our internal errors are handled and converted to external errors
// See `support/error/README.md` for how this works, especially the warning about PII.
impl GetErrorHandling for Error {
type ExternalError = TabsApiError;
fn get_error_handling(&self) -> ErrorHandling<Self::ExternalError> {
match self {
Self::SyncAdapterError(e) => ErrorHandling::convert(TabsApiError::SyncError {
reason: e.to_string(),
})
.report_error("tabs-sync-error"),
Self::JsonError(e) => ErrorHandling::convert(TabsApiError::UnexpectedTabsError {
reason: e.to_string(),
})
.report_error("tabs-json-error"),
Self::MissingLocalIdError => {
ErrorHandling::convert(TabsApiError::UnexpectedTabsError {
reason: "MissingLocalId".to_string(),
})
.report_error("tabs-missing-local-id-error")
}
Self::UrlParseError(e) => ErrorHandling::convert(TabsApiError::UnexpectedTabsError {
reason: e.to_string(),
})
.report_error("tabs-url-parse-error"),
Self::SqlError(e) => ErrorHandling::convert(TabsApiError::SqlError {
reason: e.to_string(),
})
.report_error("tabs-sql-error"),
Self::OpenDatabaseError(e) => ErrorHandling::convert(TabsApiError::SqlError {
reason: e.to_string(),
})
.report_error("tabs-open-database-error"),
}
}
}

2
third_party/rust/tabs/src/lib.rs поставляемый
Просмотреть файл

@ -30,7 +30,7 @@ impl UniffiCustomTypeConverter for TabsGuid {
pub use crate::storage::{ClientRemoteTabs, RemoteTabRecord, TabsDeviceType};
pub use crate::store::TabsStore;
use error::TabsError;
pub use error::{ApiResult, Error, Result, TabsApiError};
use sync15::DeviceType;
pub use crate::sync::engine::get_registered_sync_engine;

2
third_party/rust/tabs/src/schema.rs поставляемый
Просмотреть файл

@ -29,7 +29,7 @@ impl MigrationLogic for TabsMigrationLogin {
const NAME: &'static str = "tabs storage db";
const END_VERSION: u32 = 1;
fn prepare(&self, conn: &Connection) -> MigrationResult<()> {
fn prepare(&self, conn: &Connection, _db_empty: bool) -> MigrationResult<()> {
let initial_pragmas = "
-- We don't care about temp tables being persisted to disk.
PRAGMA temp_store = 2;

13
third_party/rust/tabs/src/storage.rs поставляемый
Просмотреть файл

@ -36,6 +36,7 @@ pub struct ClientRemoteTabs {
skip_serializing_if = "devicetype_is_unknown"
)]
pub device_type: DeviceType,
pub last_modified: i64,
pub remote_tabs: Vec<RemoteTab>,
}
@ -162,7 +163,11 @@ impl TabsStorage {
pub fn get_remote_tabs(&mut self) -> Option<Vec<ClientRemoteTabs>> {
match self.open_if_exists() {
Err(e) => {
log::error!("Failed to read remote tabs: {}", e);
error_support::report_error!(
"tabs-read-remote",
"Failed to read remote tabs: {}",
e
);
None
}
Ok(None) => None,
@ -174,7 +179,11 @@ impl TabsStorage {
) {
Ok(crts) => Some(crts),
Err(e) => {
log::error!("Failed to read database: {}", e);
error_support::report_error!(
"tabs-read-remote",
"Failed to read database: {}",
e
);
None
}
}

283
third_party/rust/tabs/src/sync/bridge.rs поставляемый
Просмотреть файл

@ -4,14 +4,13 @@
use std::sync::{Arc, Mutex};
use crate::error::{Result, TabsError};
use crate::error::{ApiResult, Result, TabsApiError};
use crate::sync::engine::TabsSyncImpl;
use crate::sync::record::TabsRecord;
use crate::TabsStore;
use sync15::engine::{
ApplyResults, BridgedEngine, CollSyncIds, EngineSyncAssociation, IncomingEnvelope,
};
use sync15::{ClientData, Payload, ServerTimestamp};
use error_support::handle_error;
use sync15::bso::{IncomingBso, OutgoingBso};
use sync15::engine::{ApplyResults, BridgedEngine, CollSyncIds, EngineSyncAssociation};
use sync15::{ClientData, ServerTimestamp};
use sync_guid::Guid as SyncGuid;
impl TabsStore {
@ -32,7 +31,7 @@ impl TabsStore {
/// See also #2841 and #5139
pub struct BridgedEngineImpl {
sync_impl: Mutex<TabsSyncImpl>,
incoming_payload: Mutex<Vec<IncomingEnvelope>>,
incoming: Mutex<Vec<IncomingBso>>,
}
impl BridgedEngineImpl {
@ -40,145 +39,146 @@ impl BridgedEngineImpl {
pub fn new(store: &Arc<TabsStore>) -> Self {
Self {
sync_impl: Mutex::new(TabsSyncImpl::new(store.clone())),
incoming_payload: Mutex::default(),
incoming: Mutex::default(),
}
}
}
impl BridgedEngine for BridgedEngineImpl {
type Error = TabsError;
type Error = TabsApiError;
fn last_sync(&self) -> Result<i64> {
Ok(self
.sync_impl
.lock()
.unwrap()
.last_sync
.unwrap_or_default()
.as_millis())
fn last_sync(&self) -> ApiResult<i64> {
handle_error! {
Ok(self
.sync_impl
.lock()
.unwrap()
.last_sync
.unwrap_or_default()
.as_millis())
}
}
fn set_last_sync(&self, last_sync_millis: i64) -> Result<()> {
self.sync_impl.lock().unwrap().last_sync =
Some(ServerTimestamp::from_millis(last_sync_millis));
Ok(())
fn set_last_sync(&self, last_sync_millis: i64) -> ApiResult<()> {
handle_error! {
self.sync_impl.lock().unwrap().last_sync =
Some(ServerTimestamp::from_millis(last_sync_millis));
Ok(())
}
}
fn sync_id(&self) -> Result<Option<String>> {
Ok(match self.sync_impl.lock().unwrap().get_sync_assoc() {
EngineSyncAssociation::Connected(id) => Some(id.coll.to_string()),
EngineSyncAssociation::Disconnected => None,
})
fn sync_id(&self) -> ApiResult<Option<String>> {
handle_error! {
Ok(match self.sync_impl.lock().unwrap().get_sync_assoc() {
EngineSyncAssociation::Connected(id) => Some(id.coll.to_string()),
EngineSyncAssociation::Disconnected => None,
})
}
}
fn reset_sync_id(&self) -> Result<String> {
let new_id = SyncGuid::random().to_string();
let new_coll_ids = CollSyncIds {
global: SyncGuid::empty(),
coll: new_id.clone().into(),
};
self.sync_impl
.lock()
.unwrap()
.reset(EngineSyncAssociation::Connected(new_coll_ids))?;
Ok(new_id)
}
fn ensure_current_sync_id(&self, sync_id: &str) -> Result<String> {
let mut sync_impl = self.sync_impl.lock().unwrap();
let assoc = sync_impl.get_sync_assoc();
if matches!(assoc, EngineSyncAssociation::Connected(c) if c.coll == sync_id) {
log::debug!("ensure_current_sync_id is current");
} else {
fn reset_sync_id(&self) -> ApiResult<String> {
handle_error! {
let new_id = SyncGuid::random().to_string();
let new_coll_ids = CollSyncIds {
global: SyncGuid::empty(),
coll: sync_id.into(),
coll: new_id.clone().into(),
};
sync_impl.reset(EngineSyncAssociation::Connected(new_coll_ids))?;
self.sync_impl
.lock()
.unwrap()
.reset(EngineSyncAssociation::Connected(new_coll_ids))?;
Ok(new_id)
}
Ok(sync_id.to_string()) // this is a bit odd, why the result?
}
fn prepare_for_sync(&self, client_data: &str) -> Result<()> {
let data: ClientData = serde_json::from_str(client_data)?;
Ok(self.sync_impl.lock().unwrap().prepare_for_sync(data)?)
fn ensure_current_sync_id(&self, sync_id: &str) -> ApiResult<String> {
handle_error! {
let mut sync_impl = self.sync_impl.lock().unwrap();
let assoc = sync_impl.get_sync_assoc();
if matches!(assoc, EngineSyncAssociation::Connected(c) if c.coll == sync_id) {
log::debug!("ensure_current_sync_id is current");
} else {
let new_coll_ids = CollSyncIds {
global: SyncGuid::empty(),
coll: sync_id.into(),
};
sync_impl.reset(EngineSyncAssociation::Connected(new_coll_ids))?;
}
Ok(sync_id.to_string()) // this is a bit odd, why the result?
}
}
fn sync_started(&self) -> Result<()> {
fn prepare_for_sync(&self, client_data: &str) -> ApiResult<()> {
handle_error! {
let data: ClientData = serde_json::from_str(client_data)?;
self.sync_impl.lock().unwrap().prepare_for_sync(data)
}
}
fn sync_started(&self) -> ApiResult<()> {
// This is a no-op for the Tabs Engine
Ok(())
}
fn store_incoming(&self, incoming_envelopes: &[IncomingEnvelope]) -> Result<()> {
// Store the incoming payload in memory so we can use it in apply
*(self.incoming_payload.lock().unwrap()) = incoming_envelopes.to_vec();
Ok(())
}
fn apply(&self) -> Result<ApplyResults> {
let incoming = self.incoming_payload.lock().unwrap();
// turn them into a TabRecord.
let mut records = Vec::with_capacity(incoming.len());
for inc in &*incoming {
// This error handling is a bit unfortunate, but will soon be removed as we
// move towards unifying the bridged_engine with a "real" engine.
let payload = match inc.payload() {
Ok(p) => p,
Err(e) => {
log::warn!("Ignoring invalid incoming envelope: {}", e);
continue;
}
};
let record = match TabsRecord::from_payload(payload) {
Ok(r) => r,
Err(e) => {
log::warn!("Ignoring invalid incoming tab record: {}", e);
continue;
}
};
records.push(record);
fn store_incoming(&self, incoming: Vec<IncomingBso>) -> ApiResult<()> {
handle_error! {
// Store the incoming payload in memory so we can use it in apply
*(self.incoming.lock().unwrap()) = incoming;
Ok(())
}
}
let mut sync_impl = self.sync_impl.lock().unwrap();
let outgoing = sync_impl.apply_incoming(records)?;
fn apply(&self) -> ApiResult<ApplyResults> {
handle_error! {
let mut incoming = self.incoming.lock().unwrap();
// We've a reference to a Vec<> but it's owned by the mutex - swap the mutex owned
// value for an empty vec so we can consume the original.
let mut records = Vec::new();
std::mem::swap(&mut records, &mut *incoming);
let mut telem = sync15::telemetry::Engine::new("tabs");
// Turn outgoing back into envelopes - a bit inefficient going via a Payload.
let mut outgoing_envelopes = Vec::with_capacity(1);
if let Some(outgoing_record) = outgoing {
let payload = Payload::from_record(outgoing_record)?;
outgoing_envelopes.push(payload.into());
let mut sync_impl = self.sync_impl.lock().unwrap();
let outgoing = sync_impl.apply_incoming(records, &mut telem)?;
Ok(ApplyResults {
records: outgoing,
num_reconciled: Some(0),
})
}
Ok(ApplyResults {
envelopes: outgoing_envelopes,
num_reconciled: Some(0),
})
}
fn set_uploaded(&self, server_modified_millis: i64, ids: &[SyncGuid]) -> Result<()> {
Ok(self
.sync_impl
.lock()
.unwrap()
.sync_finished(ServerTimestamp::from_millis(server_modified_millis), ids)?)
fn set_uploaded(&self, server_modified_millis: i64, ids: &[SyncGuid]) -> ApiResult<()> {
handle_error! {
self
.sync_impl
.lock()
.unwrap()
.sync_finished(ServerTimestamp::from_millis(server_modified_millis), ids)
}
}
fn sync_finished(&self) -> Result<()> {
*(self.incoming_payload.lock().unwrap()) = Vec::default();
Ok(())
fn sync_finished(&self) -> ApiResult<()> {
handle_error! {
*(self.incoming.lock().unwrap()) = Vec::default();
Ok(())
}
}
fn reset(&self) -> Result<()> {
self.sync_impl
.lock()
.unwrap()
.reset(EngineSyncAssociation::Disconnected)?;
Ok(())
fn reset(&self) -> ApiResult<()> {
handle_error! {
self.sync_impl
.lock()
.unwrap()
.reset(EngineSyncAssociation::Disconnected)?;
Ok(())
}
}
fn wipe(&self) -> Result<()> {
self.sync_impl.lock().unwrap().wipe()?;
Ok(())
fn wipe(&self) -> ApiResult<()> {
handle_error! {
self.sync_impl.lock().unwrap().wipe()?;
Ok(())
}
}
}
@ -192,65 +192,80 @@ impl TabsBridgedEngine {
Self { bridge_impl }
}
pub fn last_sync(&self) -> Result<i64> {
pub fn last_sync(&self) -> ApiResult<i64> {
self.bridge_impl.last_sync()
}
pub fn set_last_sync(&self, last_sync: i64) -> Result<()> {
pub fn set_last_sync(&self, last_sync: i64) -> ApiResult<()> {
self.bridge_impl.set_last_sync(last_sync)
}
pub fn sync_id(&self) -> Result<Option<String>> {
pub fn sync_id(&self) -> ApiResult<Option<String>> {
self.bridge_impl.sync_id()
}
pub fn reset_sync_id(&self) -> Result<String> {
pub fn reset_sync_id(&self) -> ApiResult<String> {
self.bridge_impl.reset_sync_id()
}
pub fn ensure_current_sync_id(&self, sync_id: &str) -> Result<String> {
pub fn ensure_current_sync_id(&self, sync_id: &str) -> ApiResult<String> {
self.bridge_impl.ensure_current_sync_id(sync_id)
}
pub fn prepare_for_sync(&self, client_data: &str) -> Result<()> {
pub fn prepare_for_sync(&self, client_data: &str) -> ApiResult<()> {
self.bridge_impl.prepare_for_sync(client_data)
}
pub fn sync_started(&self) -> Result<()> {
pub fn sync_started(&self) -> ApiResult<()> {
self.bridge_impl.sync_started()
}
pub fn store_incoming(&self, incoming: Vec<String>) -> Result<()> {
let mut envelopes = Vec::with_capacity(incoming.len());
for inc in incoming {
envelopes.push(serde_json::from_str::<IncomingEnvelope>(&inc)?);
// Decode the JSON-encoded IncomingBso's that UniFFI passes to us
fn convert_incoming_bsos(&self, incoming: Vec<String>) -> ApiResult<Vec<IncomingBso>> {
handle_error! {
let mut bsos = Vec::with_capacity(incoming.len());
for inc in incoming {
bsos.push(serde_json::from_str::<IncomingBso>(&inc)?);
}
Ok(bsos)
}
self.bridge_impl.store_incoming(&envelopes)
}
pub fn apply(&self) -> Result<Vec<String>> {
// Encode OutgoingBso's into JSON for UniFFI
fn convert_outgoing_bsos(&self, outgoing: Vec<OutgoingBso>) -> ApiResult<Vec<String>> {
handle_error! {
let mut bsos = Vec::with_capacity(outgoing.len());
for e in outgoing {
bsos.push(serde_json::to_string(&e)?);
}
Ok(bsos)
}
}
pub fn store_incoming(&self, incoming: Vec<String>) -> ApiResult<()> {
self.bridge_impl
.store_incoming(self.convert_incoming_bsos(incoming)?)
}
pub fn apply(&self) -> ApiResult<Vec<String>> {
let apply_results = self.bridge_impl.apply()?;
let mut envelopes = Vec::with_capacity(apply_results.envelopes.len());
for e in apply_results.envelopes {
envelopes.push(serde_json::to_string(&e)?);
}
Ok(envelopes)
self.convert_outgoing_bsos(apply_results.records)
}
pub fn set_uploaded(&self, server_modified_millis: i64, guids: Vec<SyncGuid>) -> Result<()> {
pub fn set_uploaded(&self, server_modified_millis: i64, guids: Vec<SyncGuid>) -> ApiResult<()> {
self.bridge_impl
.set_uploaded(server_modified_millis, &guids)
}
pub fn sync_finished(&self) -> Result<()> {
pub fn sync_finished(&self) -> ApiResult<()> {
self.bridge_impl.sync_finished()
}
pub fn reset(&self) -> Result<()> {
pub fn reset(&self) -> ApiResult<()> {
self.bridge_impl.reset()
}
pub fn wipe(&self) -> Result<()> {
pub fn wipe(&self) -> ApiResult<()> {
self.bridge_impl.wipe()
}
}
@ -397,13 +412,9 @@ mod tests {
let expected = json!({
"id": "my-device".to_string(),
"payload": json!({
// XXX - we aren't supposed to have the ID here, but this isn't a tabs
// issue, it's a pre-existing `Payload` issue.
"id": "my-device".to_string(),
"clientName": "my device",
"tabs": serde_json::to_value(expected_tabs).unwrap(),
}).to_string(),
"sortindex": (),
"ttl": TTL_1_YEAR,
});

133
third_party/rust/tabs/src/sync/engine.rs поставляемый
Просмотреть файл

@ -5,14 +5,15 @@
use crate::storage::{ClientRemoteTabs, RemoteTab};
use crate::store::TabsStore;
use crate::sync::record::{TabsRecord, TabsRecordTab};
use anyhow::Result;
use crate::Result;
use std::collections::HashMap;
use std::sync::{Arc, Mutex, Weak};
use sync15::bso::{IncomingBso, OutgoingBso, OutgoingEnvelope};
use sync15::engine::{
CollectionRequest, EngineSyncAssociation, IncomingChangeset, OutgoingChangeset, SyncEngine,
SyncEngineId,
};
use sync15::{telemetry, ClientData, DeviceType, Payload, RemoteClient, ServerTimestamp};
use sync15::{telemetry, ClientData, DeviceType, RemoteClient, ServerTimestamp};
use sync_guid::Guid;
const TTL_1_YEAR: u32 = 31_622_400;
@ -41,6 +42,7 @@ pub fn get_registered_sync_engine(engine_id: &SyncEngineId) -> Option<Box<dyn Sy
impl ClientRemoteTabs {
fn from_record_with_remote_client(
client_id: String,
last_modified: ServerTimestamp,
remote_client: &RemoteClient,
record: TabsRecord,
) -> Self {
@ -48,15 +50,17 @@ impl ClientRemoteTabs {
client_id,
client_name: remote_client.device_name.clone(),
device_type: remote_client.device_type.unwrap_or(DeviceType::Unknown),
last_modified: last_modified.as_millis(),
remote_tabs: record.tabs.iter().map(RemoteTab::from_record_tab).collect(),
}
}
fn from_record(client_id: String, record: TabsRecord) -> Self {
fn from_record(client_id: String, last_modified: ServerTimestamp, record: TabsRecord) -> Self {
Self {
client_id,
client_name: record.client_name,
device_type: DeviceType::Unknown,
last_modified: last_modified.as_millis(),
remote_tabs: record.tabs.iter().map(RemoteTab::from_record_tab).collect(),
}
}
@ -69,7 +73,6 @@ impl ClientRemoteTabs {
.iter()
.map(RemoteTab::to_record_tab)
.collect(),
ttl: TTL_1_YEAR,
}
}
}
@ -120,15 +123,30 @@ impl TabsSyncImpl {
Ok(())
}
pub fn apply_incoming(&mut self, inbound: Vec<TabsRecord>) -> Result<Option<TabsRecord>> {
pub fn apply_incoming(
&mut self,
inbound: Vec<IncomingBso>,
telem: &mut telemetry::Engine,
) -> Result<Vec<OutgoingBso>> {
let local_id = self.local_id.clone();
let mut remote_tabs = Vec::with_capacity(inbound.len());
let mut incoming_telemetry = telemetry::EngineIncoming::new();
for record in inbound {
if record.id == local_id {
for incoming in inbound {
if incoming.envelope.id == local_id {
// That's our own record, ignore it.
continue;
}
let modified = incoming.envelope.modified;
let record = match incoming.into_content::<TabsRecord>().content() {
Some(record) => record,
None => {
// Invalid record or a "tombstone" which tabs don't have.
log::warn!("Ignoring incoming invalid tab");
incoming_telemetry.failed(1);
continue;
}
};
let id = record.id.clone();
let crt = if let Some(remote_client) = self.remote_clients.get(&id) {
ClientRemoteTabs::from_record_with_remote_client(
@ -137,6 +155,7 @@ impl TabsSyncImpl {
.as_ref()
.unwrap_or(&id)
.to_owned(),
modified,
remote_client,
record,
)
@ -145,11 +164,19 @@ impl TabsSyncImpl {
// could happen - in most cases though, it will be due to a disconnected client -
// so we really should consider just dropping it? (Sadly though, it does seem
// possible it's actually a very recently connected client, so we keep it)
// XXX - this is actually a foot-gun, particularly for desktop. If we don't know
// the device, we assume the device ID is the fxa-device-id, which may not be the
// case.
// So we should drop these records! But we can't do this now because stand alone
// syncing (ie, store.sync()) doesn't allow us to pass the device list in, so
// we'd get no rows!
// See also: https://github.com/mozilla/application-services/issues/5199
log::info!(
"Storing tabs from a client that doesn't appear in the devices list: {}",
id,
);
ClientRemoteTabs::from_record(id, record)
ClientRemoteTabs::from_record(id, modified, record)
};
remote_tabs.push(crt);
}
@ -172,16 +199,26 @@ impl TabsSyncImpl {
})
.unwrap_or_else(|| (String::new(), DeviceType::Unknown));
let local_record = ClientRemoteTabs {
client_id: local_id,
client_id: local_id.clone(),
client_name,
device_type,
last_modified: 0, // ignored for outgoing records.
remote_tabs: local_tabs.to_vec(),
};
log::trace!("outgoing {:?}", local_record);
Some(local_record.to_record())
let envelope = OutgoingEnvelope {
id: local_id.into(),
ttl: Some(TTL_1_YEAR),
..Default::default()
};
vec![OutgoingBso::from_content(
envelope,
local_record.to_record(),
)?]
} else {
None
vec![]
};
telem.incoming(incoming_telemetry);
Ok(outgoing)
}
@ -237,65 +274,50 @@ impl SyncEngine for TabsEngine {
"tabs".into()
}
fn prepare_for_sync(&self, get_client_data: &dyn Fn() -> ClientData) -> Result<()> {
self.sync_impl
fn prepare_for_sync(&self, get_client_data: &dyn Fn() -> ClientData) -> anyhow::Result<()> {
Ok(self
.sync_impl
.lock()
.unwrap()
.prepare_for_sync(get_client_data())
.prepare_for_sync(get_client_data())?)
}
fn apply_incoming(
&self,
inbound: Vec<IncomingChangeset>,
telem: &mut telemetry::Engine,
) -> Result<OutgoingChangeset> {
) -> anyhow::Result<OutgoingChangeset> {
assert_eq!(inbound.len(), 1, "only requested one set of records");
let inbound = inbound.into_iter().next().unwrap();
let mut incoming_telemetry = telemetry::EngineIncoming::new();
let mut incoming_records = Vec::with_capacity(inbound.changes.len());
for incoming in inbound.changes {
let record = match TabsRecord::from_payload(incoming.0) {
Ok(record) => record,
Err(e) => {
log::warn!("Error deserializing incoming record: {}", e);
incoming_telemetry.failed(1);
continue;
}
};
incoming_records.push(record);
}
let outgoing_record = self
let outgoing_records = self
.sync_impl
.lock()
.unwrap()
.apply_incoming(incoming_records)?;
.apply_incoming(inbound.changes, telem)?;
let mut outgoing = OutgoingChangeset::new("tabs", inbound.timestamp);
if let Some(outgoing_record) = outgoing_record {
let payload = Payload::from_record(outgoing_record)?;
outgoing.changes.push(payload);
}
telem.incoming(incoming_telemetry);
Ok(outgoing)
Ok(OutgoingChangeset::new_with_changes(
"tabs",
inbound.timestamp,
outgoing_records,
))
}
fn sync_finished(
&self,
new_timestamp: ServerTimestamp,
records_synced: Vec<Guid>,
) -> Result<()> {
self.sync_impl
) -> anyhow::Result<()> {
Ok(self
.sync_impl
.lock()
.unwrap()
.sync_finished(new_timestamp, &records_synced)
.sync_finished(new_timestamp, &records_synced)?)
}
fn get_collection_requests(
&self,
server_timestamp: ServerTimestamp,
) -> Result<Vec<CollectionRequest>> {
) -> anyhow::Result<Vec<CollectionRequest>> {
let since = self.sync_impl.lock().unwrap().last_sync.unwrap_or_default();
Ok(if since == server_timestamp {
vec![]
@ -304,16 +326,16 @@ impl SyncEngine for TabsEngine {
})
}
fn get_sync_assoc(&self) -> Result<EngineSyncAssociation> {
fn get_sync_assoc(&self) -> anyhow::Result<EngineSyncAssociation> {
Ok(self.sync_impl.lock().unwrap().get_sync_assoc().clone())
}
fn reset(&self, assoc: &EngineSyncAssociation) -> Result<()> {
self.sync_impl.lock().unwrap().reset(assoc.clone())
fn reset(&self, assoc: &EngineSyncAssociation) -> anyhow::Result<()> {
Ok(self.sync_impl.lock().unwrap().reset(assoc.clone())?)
}
fn wipe(&self) -> Result<()> {
self.sync_impl.lock().unwrap().wipe()
fn wipe(&self) -> anyhow::Result<()> {
Ok(self.sync_impl.lock().unwrap().wipe()?)
}
}
@ -333,7 +355,7 @@ impl crate::TabsStore {
pub mod test {
use super::*;
use serde_json::json;
use sync15::DeviceType;
use sync15::bso::IncomingBso;
#[test]
fn test_incoming_tabs() {
@ -374,11 +396,14 @@ pub mod test {
}),
];
let mut incoming = IncomingChangeset::new(engine.collection_name(), ServerTimestamp(0));
for record in records {
let payload = Payload::from_json(record).unwrap();
incoming.changes.push((payload, ServerTimestamp(0)));
}
let incoming = IncomingChangeset::new_with_changes(
engine.collection_name(),
ServerTimestamp(0),
records
.into_iter()
.map(IncomingBso::from_test_content)
.collect(),
);
let outgoing = engine
.apply_incoming(vec![incoming], &mut telemetry::Engine::new("tabs"))
.expect("Should apply incoming and stage outgoing records");

85
third_party/rust/tabs/src/sync/full_sync.rs поставляемый
Просмотреть файл

@ -2,18 +2,21 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{error::Result, TabsEngine, TabsStore};
use crate::{sync::engine::TabsSyncImpl, ApiResult, Result, TabsEngine, TabsStore};
use error_support::handle_error;
use interrupt_support::NeverInterrupts;
use std::sync::Arc;
use sync15::client::{sync_multiple, MemoryCachedState, Sync15StorageClientInit};
use sync15::engine::{EngineSyncAssociation, SyncEngine};
use sync15::engine::EngineSyncAssociation;
use sync15::KeyBundle;
impl TabsStore {
pub fn reset(self: Arc<Self>) -> Result<()> {
let engine = TabsEngine::new(Arc::clone(&self));
engine.reset(&EngineSyncAssociation::Disconnected)?;
Ok(())
pub fn reset(self: Arc<Self>) -> ApiResult<()> {
handle_error! {
let mut sync_impl = TabsSyncImpl::new(Arc::clone(&self));
sync_impl.reset(EngineSyncAssociation::Disconnected)?;
Ok(())
}
}
/// A convenience wrapper around sync_multiple.
@ -24,44 +27,46 @@ impl TabsStore {
sync_key: String,
tokenserver_url: String,
local_id: String,
) -> Result<String> {
let mut mem_cached_state = MemoryCachedState::default();
let engine = TabsEngine::new(Arc::clone(&self));
) -> ApiResult<String> {
handle_error! {
let mut mem_cached_state = MemoryCachedState::default();
let engine = TabsEngine::new(Arc::clone(&self));
// Since we are syncing without the sync manager, there's no
// command processor, therefore no clients engine, and in
// consequence `TabsStore::prepare_for_sync` is never called
// which means our `local_id` will never be set.
// Do it here.
engine.sync_impl.lock().unwrap().local_id = local_id;
// Since we are syncing without the sync manager, there's no
// command processor, therefore no clients engine, and in
// consequence `TabsStore::prepare_for_sync` is never called
// which means our `local_id` will never be set.
// Do it here.
engine.sync_impl.lock().unwrap().local_id = local_id;
let storage_init = &Sync15StorageClientInit {
key_id,
access_token,
tokenserver_url: url::Url::parse(tokenserver_url.as_str())?,
};
let root_sync_key = &KeyBundle::from_ksync_base64(sync_key.as_str())?;
let storage_init = &Sync15StorageClientInit {
key_id,
access_token,
tokenserver_url: url::Url::parse(tokenserver_url.as_str())?,
};
let root_sync_key = &KeyBundle::from_ksync_base64(sync_key.as_str())?;
let mut result = sync_multiple(
&[&engine],
&mut None,
&mut mem_cached_state,
storage_init,
root_sync_key,
&NeverInterrupts,
None,
);
let mut result = sync_multiple(
&[&engine],
&mut None,
&mut mem_cached_state,
storage_init,
root_sync_key,
&NeverInterrupts,
None,
);
// for b/w compat reasons, we do some dances with the result.
// XXX - note that this means telemetry isn't going to be reported back
// to the app - we need to check with lockwise about whether they really
// need these failures to be reported or whether we can loosen this.
if let Err(e) = result.result {
return Err(e.into());
}
match result.engine_results.remove("tabs") {
None | Some(Ok(())) => Ok(serde_json::to_string(&result.telemetry)?),
Some(Err(e)) => Err(e.into()),
// for b/w compat reasons, we do some dances with the result.
// XXX - note that this means telemetry isn't going to be reported back
// to the app - we need to check with lockwise about whether they really
// need these failures to be reported or whether we can loosen this.
if let Err(e) = result.result {
return Err(e.into());
}
match result.engine_results.remove("tabs") {
None | Some(Ok(())) => Ok(serde_json::to_string(&result.telemetry)?),
Some(Err(e)) => Err(e.into()),
}
}
}
}

20
third_party/rust/tabs/src/sync/mod.rs поставляемый
Просмотреть файл

@ -12,11 +12,11 @@ pub mod full_sync;
// When full-sync isn't enabled we need stub versions for these UDL exposed functions.
#[cfg(not(feature = "full-sync"))]
impl crate::TabsStore {
pub fn reset(self: std::sync::Arc<Self>) -> crate::error::Result<()> {
log::error!("reset: feature not enabled");
Err(crate::error::TabsError::SyncAdapterError(
"reset".to_string(),
))
pub fn reset(self: std::sync::Arc<Self>) -> crate::error::ApiResult<()> {
log::warn!("reset: feature not enabled");
Err(crate::error::TabsApiError::SyncError {
reason: "reset".to_string(),
})
}
pub fn sync(
@ -26,10 +26,10 @@ impl crate::TabsStore {
_sync_key: String,
_tokenserver_url: String,
_local_id: String,
) -> crate::error::Result<String> {
log::error!("sync: feature not enabled");
Err(crate::error::TabsError::SyncAdapterError(
"sync".to_string(),
))
) -> crate::error::ApiResult<String> {
log::warn!("sync: feature not enabled");
Err(crate::error::TabsApiError::SyncError {
reason: "sync".to_string(),
})
}
}

61
third_party/rust/tabs/src/sync/record.rs поставляемый
Просмотреть файл

@ -3,7 +3,6 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use serde_derive::{Deserialize, Serialize};
use sync15::Payload;
#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
@ -22,16 +21,6 @@ pub struct TabsRecord {
pub id: String,
pub client_name: String,
pub tabs: Vec<TabsRecordTab>,
#[serde(default)]
pub ttl: u32,
}
impl TabsRecord {
#[inline]
pub fn from_payload(payload: Payload) -> crate::error::Result<Self> {
let record: TabsRecord = payload.into_record()?;
Ok(record)
}
}
#[cfg(test)]
@ -40,8 +29,8 @@ pub mod test {
use serde_json::json;
#[test]
fn test_simple() {
let payload = Payload::from_json(json!({
fn test_payload() {
let payload = json!({
"id": "JkeBPC50ZI0m",
"clientName": "client name",
"tabs": [{
@ -52,9 +41,8 @@ pub mod test {
"icon": "https://mozilla.org/icon",
"lastUsed": 1643764207
}]
}))
.expect("json is valid");
let record = TabsRecord::from_payload(payload).expect("payload is valid");
});
let record: TabsRecord = serde_json::from_value(payload).expect("should work");
assert_eq!(record.id, "JkeBPC50ZI0m");
assert_eq!(record.client_name, "client name");
assert_eq!(record.tabs.len(), 1);
@ -64,18 +52,45 @@ pub mod test {
assert_eq!(tab.last_used, 1643764207);
}
#[test]
fn test_roundtrip() {
let tab = TabsRecord {
id: "JkeBPC50ZI0m".into(),
client_name: "client name".into(),
tabs: vec![TabsRecordTab {
title: "the title".into(),
url_history: vec!["https://mozilla.org/".into()],
icon: Some("https://mozilla.org/icon".into()),
last_used: 1643764207,
}],
};
let round_tripped =
serde_json::from_value(serde_json::to_value(tab.clone()).unwrap()).unwrap();
assert_eq!(tab, round_tripped);
}
#[test]
fn test_extra_fields() {
let payload = Payload::from_json(json!({
let payload = json!({
"id": "JkeBPC50ZI0m",
"clientName": "client name",
"tabs": [],
// Let's say we agree on new tabs to record, we want old versions to
// ignore them!
"recentlyClosed": [],
}))
.expect("json is valid");
let record = TabsRecord::from_payload(payload).expect("payload is valid");
"ignoredField": "??",
"clientName": "client name",
"tabs": [{
"title": "the title",
"urlHistory": [
"https://mozilla.org/"
],
"icon": "https://mozilla.org/icon",
"lastUsed": 1643764207,
// Ditto - make sure we ignore unexpected fields in each tab.
"ignoredField": "??",
}]
});
let record: TabsRecord = serde_json::from_value(payload).unwrap();
// The point of this test is really just to ensure the deser worked, so
// just check the ID.
assert_eq!(record.id, "JkeBPC50ZI0m");
}
}

46
third_party/rust/tabs/src/tabs.udl поставляемый
Просмотреть файл

@ -6,14 +6,10 @@ namespace tabs {
};
[Error]
enum TabsError {
"SyncAdapterError",
"SyncResetError",
"JsonError",
"MissingLocalIdError",
"UrlParseError",
"SqlError",
"OpenDatabaseError",
interface TabsApiError {
SyncError(string reason);
SqlError(string reason);
UnexpectedTabsError(string reason);
};
@ -27,10 +23,10 @@ interface TabsStore {
[Self=ByArc]
void register_with_sync_manager();
[Throws=TabsError, Self=ByArc]
[Throws=TabsApiError, Self=ByArc]
void reset();
[Throws=TabsError, Self=ByArc]
[Throws=TabsApiError, Self=ByArc]
string sync(string key_id, string access_token, string sync_key, string tokenserver_url, string local_id);
[Self=ByArc]
@ -48,6 +44,7 @@ dictionary RemoteTabRecord {
string title;
sequence<string> url_history;
string? icon;
// Number of ms since the unix epoch (as reported by the client's clock)
i64 last_used;
};
@ -55,11 +52,14 @@ dictionary ClientRemoteTabs {
string client_id;
string client_name;
TabsDeviceType device_type;
// Number of ms since the unix epoch (as reported by the server's clock)
i64 last_modified;
sequence<RemoteTabRecord> remote_tabs;
};
// Note the canonical docs for this are in https://searchfox.org/mozilla-central/source/services/interfaces/mozIBridgedSyncEngine.idl
// It's only actually used in desktop, but it's fine to expose this everywhere.
// NOTE: all timestamps here are milliseconds.
interface TabsBridgedEngine {
//readonly attribute long storageVersion;
// readonly attribute boolean allowSkippedRecord;
@ -67,42 +67,42 @@ interface TabsBridgedEngine {
// XXX - better logging story than this?
// attribute mozIServicesLogSink logger;
[Throws=TabsError]
[Throws=TabsApiError]
i64 last_sync();
[Throws=TabsError]
[Throws=TabsApiError]
void set_last_sync(i64 last_sync);
[Throws=TabsError]
[Throws=TabsApiError]
string? sync_id();
[Throws=TabsError]
[Throws=TabsApiError]
string reset_sync_id();
[Throws=TabsError]
[Throws=TabsApiError]
string ensure_current_sync_id([ByRef]string new_sync_id);
[Throws=TabsError]
[Throws=TabsApiError]
void prepare_for_sync([ByRef]string client_data);
[Throws=TabsError]
[Throws=TabsApiError]
void sync_started();
[Throws=TabsError]
[Throws=TabsApiError]
void store_incoming(sequence<string> incoming_envelopes_as_json);
[Throws=TabsError]
[Throws=TabsApiError]
sequence<string> apply();
[Throws=TabsError]
[Throws=TabsApiError]
void set_uploaded(i64 new_timestamp, sequence<TabsGuid> uploaded_ids);
[Throws=TabsError]
[Throws=TabsApiError]
void sync_finished();
[Throws=TabsError]
[Throws=TabsApiError]
void reset();
[Throws=TabsError]
[Throws=TabsApiError]
void wipe();
};

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"5b77a07a16d2848e0b7c94ebe1b2d5daefb6271b2daf907b5f92660619a4e5f6","README.md":"a6856d0f86aaade17cb9fa61c153aca085903d0676fae953022aeab235996cb7","src/backend.rs":"66019f4d436f1575e19b3ff70c61320cf7cdfb8eb95f93b77c8ad350faf96c7c","src/backend/ffi.rs":"e168935f29397aac424f11bc0827917aca0035a71e5246dd75e92afc31459857","src/error.rs":"98ca92b58bd8b4f3c9d4c6d03ed235609d486fe8121277004283b9cfda6e3260","src/fetch_msg_types.proto":"de8a46a4947a140783a4d714364f18ccf02c4759d6ab5ace9da0b1c058efa6c3","src/headers.rs":"fd176060449e18d309c3153e6c14e854c79010b1ddd127a9ae902d5ac21529f5","src/headers/name.rs":"dcfd4d42326724f822893cf6ac90f1e14734dba178150dcb606f4b19de5e66d7","src/lib.rs":"7fb25cab1e487902c30068546984568dec969e269c3318dc031e887a475fe51f","src/mozilla.appservices.httpconfig.protobuf.rs":"59e64f2b997bc99da654c37d0a36ae7d08456cd384ab7c8c501e3990c5f97544","src/settings.rs":"bf1b3bd31fd83a5fd8834088f985710607e02038ad098bfab59acc96909e51d2"},"package":null}
{"files":{"Cargo.toml":"a285b9f5a43742f35c8c7fdb03d4b08dd475156e5c18426903c8ab3175bdd0f4","README.md":"a6856d0f86aaade17cb9fa61c153aca085903d0676fae953022aeab235996cb7","src/backend.rs":"22c313dd0ecbe92803219d3770bb97b3f876ed2fdc4ac8b5ac8dbea92b563e9f","src/backend/ffi.rs":"9ce49be773b2eb51aeef00a15e1d33f34e48e916c5e8b628fdc0ee7cc6d40e15","src/error.rs":"98ca92b58bd8b4f3c9d4c6d03ed235609d486fe8121277004283b9cfda6e3260","src/fetch_msg_types.proto":"de8a46a4947a140783a4d714364f18ccf02c4759d6ab5ace9da0b1c058efa6c3","src/headers.rs":"fd176060449e18d309c3153e6c14e854c79010b1ddd127a9ae902d5ac21529f5","src/headers/name.rs":"dcfd4d42326724f822893cf6ac90f1e14734dba178150dcb606f4b19de5e66d7","src/lib.rs":"7fb25cab1e487902c30068546984568dec969e269c3318dc031e887a475fe51f","src/mozilla.appservices.httpconfig.protobuf.rs":"59e64f2b997bc99da654c37d0a36ae7d08456cd384ab7c8c501e3990c5f97544","src/settings.rs":"f62d0779d7b86af5daad0c23fb61a5982c11520e6fa528ebe2e2d6ad76e70afd"},"package":null}

1
third_party/rust/viaduct/Cargo.toml поставляемый
Просмотреть файл

@ -15,6 +15,7 @@ log = "0.4"
serde = "1"
serde_json = "1"
once_cell = "1.5"
parking_lot = { version = ">=0.11,<=0.12" }
prost = "0.8"
prost-derive = "0.8"
ffi-support = "0.4"

46
third_party/rust/viaduct/src/backend.rs поставляемый
Просмотреть файл

@ -2,6 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::GLOBAL_SETTINGS;
use ffi::FfiBackend;
use once_cell::sync::OnceCell;
mod ffi;
@ -50,6 +51,14 @@ pub fn validate_request(request: &crate::Request) -> Result<(), crate::Error> {
Some(url::Host::Ipv6(addr)) => !addr.is_loopback(),
None => true,
}
&& {
let settings = GLOBAL_SETTINGS.read();
settings
.addn_allowed_insecure_url
.as_ref()
.map(|url| url.host() != request.url.host() || url.scheme() != request.url.scheme())
.unwrap_or(true)
}
{
return Err(crate::Error::NonTlsUrl);
}
@ -58,7 +67,8 @@ pub fn validate_request(request: &crate::Request) -> Result<(), crate::Error> {
#[cfg(test)]
mod tests {
use super::validate_request;
use super::*;
#[test]
fn test_validate_request() {
let _https_request = crate::Request::new(
@ -107,4 +117,38 @@ mod tests {
);
assert!(validate_request(&localhost_request_ipv6).is_ok());
}
#[test]
fn test_validate_request_addn_allowed_insecure_url() {
let request_root = crate::Request::new(
crate::Method::Get,
url::Url::parse("http://anything").unwrap(),
);
let request = crate::Request::new(
crate::Method::Get,
url::Url::parse("http://anything/path").unwrap(),
);
// This should never be accepted.
let request_ftp = crate::Request::new(
crate::Method::Get,
url::Url::parse("ftp://anything/path").unwrap(),
);
assert!(validate_request(&request_root).is_err());
assert!(validate_request(&request).is_err());
{
let mut settings = GLOBAL_SETTINGS.write();
settings.addn_allowed_insecure_url =
Some(url::Url::parse("http://something-else").unwrap());
}
assert!(validate_request(&request_root).is_err());
assert!(validate_request(&request).is_err());
{
let mut settings = GLOBAL_SETTINGS.write();
settings.addn_allowed_insecure_url = Some(url::Url::parse("http://anything").unwrap());
}
assert!(validate_request(&request_root).is_ok());
assert!(validate_request(&request).is_ok());
assert!(validate_request(&request_ftp).is_err());
}
}

29
third_party/rust/viaduct/src/backend/ffi.rs поставляемый
Просмотреть файл

@ -10,6 +10,7 @@ ffi_support::implement_into_ffi_by_protobuf!(msg_types::Request);
impl From<crate::Request> for msg_types::Request {
fn from(request: crate::Request) -> Self {
let settings = GLOBAL_SETTINGS.read();
msg_types::Request {
url: request.url.to_string(),
body: request.body,
@ -17,14 +18,10 @@ impl From<crate::Request> for msg_types::Request {
// it certainly makes it convenient for us...
method: request.method as i32,
headers: request.headers.into(),
follow_redirects: GLOBAL_SETTINGS.follow_redirects,
use_caches: GLOBAL_SETTINGS.use_caches,
connect_timeout_secs: GLOBAL_SETTINGS
.connect_timeout
.map_or(0, |d| d.as_secs() as i32),
read_timeout_secs: GLOBAL_SETTINGS
.read_timeout
.map_or(0, |d| d.as_secs() as i32),
follow_redirects: settings.follow_redirects,
use_caches: settings.use_caches,
connect_timeout_secs: settings.connect_timeout.map_or(0, |d| d.as_secs() as i32),
read_timeout_secs: settings.read_timeout.map_or(0, |d| d.as_secs() as i32),
}
}
}
@ -193,4 +190,20 @@ pub extern "C" fn viaduct_initialize(callback: FetchCallback) -> u8 {
ffi_support::abort_on_panic::call_with_output(|| callback_holder::set_callback(callback))
}
/// Allows connections to the hard-coded address the Android Emulator uses for
/// localhost. It would be easy to support allowing the address to be passed in,
/// but we've made a decision to avoid that possible footgun. The expectation is
/// that this will only be called in debug builds or if the app can determine it
/// is in the emulator, but the Rust code doesn't know that, so we can't check.
#[no_mangle]
pub extern "C" fn viaduct_allow_android_emulator_loopback() {
let mut error = ffi_support::ExternError::default();
ffi_support::call_with_output(&mut error, || {
let url = url::Url::parse("http://10.0.2.2").unwrap();
let mut settings = GLOBAL_SETTINGS.write();
settings.addn_allowed_insecure_url = Some(url);
});
error.consume_and_log_if_error();
}
ffi_support::define_bytebuffer_destructor!(viaduct_destroy_bytebuffer);

21
third_party/rust/viaduct/src/settings.rs поставляемый
Просмотреть файл

@ -2,7 +2,10 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use once_cell::sync::Lazy;
use parking_lot::RwLock;
use std::time::Duration;
use url::Url;
/// Note: reqwest allows these only to be specified per-Client. concept-fetch
/// allows these to be specified on each call to fetch. I think it's worth
@ -20,6 +23,9 @@ pub struct Settings {
pub connect_timeout: Option<Duration>,
pub follow_redirects: bool,
pub use_caches: bool,
// For testing purposes, we allow exactly one additional Url which is
// allowed to not be https.
pub addn_allowed_insecure_url: Option<Url>,
}
#[cfg(target_os = "ios")]
@ -29,9 +35,12 @@ const TIMEOUT_DURATION: Duration = Duration::from_secs(7);
const TIMEOUT_DURATION: Duration = Duration::from_secs(10);
// The singleton instance of our settings.
pub static GLOBAL_SETTINGS: &Settings = &Settings {
read_timeout: Some(TIMEOUT_DURATION),
connect_timeout: Some(TIMEOUT_DURATION),
follow_redirects: true,
use_caches: false,
};
pub static GLOBAL_SETTINGS: Lazy<RwLock<Settings>> = Lazy::new(|| {
RwLock::new(Settings {
read_timeout: Some(TIMEOUT_DURATION),
connect_timeout: Some(TIMEOUT_DURATION),
follow_redirects: true,
use_caches: false,
addn_allowed_insecure_url: None,
})
});

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"24e2a4528145dd4f290fca3158f5fa019e2f1c7f501a8aa062823dcae6093c90","README.md":"1fd617294339930ee1ad5172377648b268cce0216fc3971facbfe7c6839e9ab1","build.rs":"2bea192a782a5ebe3d3ec0ca6dae2d51844eb7ad163a38a2a62fbfe6dd3c34d8","sql/create_schema.sql":"a17311a407ec10e033886b7125da4c8b84bc6d761f6b28edc9594de430e1d964","sql/create_sync_temp_tables.sql":"860ede362c94feb47d85522553fa2852f9bdb9f9b025d6438dd5dee3d4acd527","sql/tests/create_schema_v1.sql":"77cf0c90eaac3e1aea626537147e1b8ec349b68d6076c92fa7ae402aac613050","src/api.rs":"f3e6f8065089df06ef4b8ce093727154f96afb7ea168b083d942a30266e7dbf8","src/db.rs":"72b2df354785278af7f87baa0ee4231df9abe2831c4b1413ac760f7efd80b519","src/error.rs":"504e8254170c7c969ebf339a4ae937817ea034d312a7667667cc8722e36d1d7b","src/ffi.rs":"670088d3a13a7349e751489197a3bb123990db69fccf8b815831e9bf5901afc6","src/lib.rs":"324300143818ad545f7e85f9bb5dba03ca45e9002e110d824a3639b5213d8763","src/migration.rs":"af598e99e0cdf761fc048f9a892dcdfbc5cb85be154b7f10b96da64d0a7f0775","src/schema.rs":"f2b41a609cd688c38ce9d846c9a7989dfe0bbc5f5095a03f29c1f900cb310efc","src/store.rs":"8d69eda0f461106102cdf5754584d51963929f7d50dbdb8197d829f95337aa37","src/sync/bridge.rs":"32a4e754809d2717f869d40f0b319ebc7ba34e4c8b54879564a4c9c3fc9ce052","src/sync/incoming.rs":"d128e9cdd54e93083d59fc24b4b3b8909e57303543bf6d6403ab1849927b07cc","src/sync/mod.rs":"9441cca9427141879f4abca1b62b63c02b49779acd3b110bbfb5ec8795c41e66","src/sync/outgoing.rs":"ee9fcc719c9e5ace3b1d525e4cc28f21212ecdf246db24994fb0e8a120605435","src/sync/sync_tests.rs":"d54791dbf46137d2b9da34550190641042af30c377b72c9826befabf93b23065"},"package":null}
{"files":{"Cargo.toml":"f9157ed05588ee9214c48973e643b4379924a18e82ca9ad292953c0895ac28a3","README.md":"1fd617294339930ee1ad5172377648b268cce0216fc3971facbfe7c6839e9ab1","build.rs":"2bea192a782a5ebe3d3ec0ca6dae2d51844eb7ad163a38a2a62fbfe6dd3c34d8","sql/create_schema.sql":"a17311a407ec10e033886b7125da4c8b84bc6d761f6b28edc9594de430e1d964","sql/create_sync_temp_tables.sql":"860ede362c94feb47d85522553fa2852f9bdb9f9b025d6438dd5dee3d4acd527","sql/tests/create_schema_v1.sql":"77cf0c90eaac3e1aea626537147e1b8ec349b68d6076c92fa7ae402aac613050","src/api.rs":"f3e6f8065089df06ef4b8ce093727154f96afb7ea168b083d942a30266e7dbf8","src/db.rs":"72b2df354785278af7f87baa0ee4231df9abe2831c4b1413ac760f7efd80b519","src/error.rs":"29a0e2f62d7bebe71eb7c0e41fe14390b8e82c873a0f7a6c673c80b2b1b20409","src/ffi.rs":"670088d3a13a7349e751489197a3bb123990db69fccf8b815831e9bf5901afc6","src/lib.rs":"324300143818ad545f7e85f9bb5dba03ca45e9002e110d824a3639b5213d8763","src/migration.rs":"69448c7601a9f2506394bc59ef0d454db418138f3378157a9b7233dc33e5659a","src/schema.rs":"cce3ed593809c3e47bbc050e6c2795cecd1b1ce7d6e39da633123e7a0614a213","src/store.rs":"8d69eda0f461106102cdf5754584d51963929f7d50dbdb8197d829f95337aa37","src/sync/bridge.rs":"ddd402d72c7a2e1d13497bd2c1a4767f24a38a10693bc8dc9115c1d68d615ccd","src/sync/incoming.rs":"dd77c64e2ade4f39cba258decab6d3db8ad0b5f513aa018efbd56b9869a021d9","src/sync/mod.rs":"bd1bc5c428dfda6aee7efe53b6e74b8015da5129a303638a21ca8d63516e4061","src/sync/outgoing.rs":"f55b1397d038f8a6211b55b57147ff90c035c5f2ec85b1da2722d756dab41fec","src/sync/sync_tests.rs":"f3846ca7e463315ba9788826613b987ddcff7b21672ff257a98769ee94f4191a"},"package":null}

1
third_party/rust/webext-storage/Cargo.toml поставляемый
Просмотреть файл

@ -30,6 +30,7 @@ features = ["functions", "bundled", "serde_json", "unlock_notify", "column_declt
[dev-dependencies]
env_logger = { version = "0.7", default-features = false }
prettytable-rs = "0.8"
sync15 = {path = "../../components/sync15", features=["test-utils"]}
tempfile = "3"
# A *direct* dep on the -sys crate is required for our build.rs
# to see the DEP_SQLITE3_LINK_TARGET env var that cargo sets

Просмотреть файл

@ -49,8 +49,8 @@ pub enum ErrorKind {
#[error("Error opening database: {0}")]
OpenDatabaseError(#[from] sql_support::open_database::Error),
#[error("{0}")]
IncomingPayloadError(#[from] sync15::engine::PayloadError),
#[error("Sync Payload Error: {0}")]
IncomingPayloadError(#[from] sync15::Error),
}
error_support::define_error! {
@ -60,7 +60,7 @@ error_support::define_error! {
(IoError, std::io::Error),
(InterruptedError, Interrupted),
(Utf8Error, std::str::Utf8Error),
(IncomingPayloadError, sync15::engine::PayloadError),
(IncomingPayloadError, sync15::Error),
(OpenDatabaseError, sql_support::open_database::Error),
}
}

Просмотреть файл

@ -295,7 +295,11 @@ impl MigrationInfo {
// Force test failure, but just log an error otherwise so that
// we commit the transaction that wil.
debug_assert!(false, "Failed to read migration JSON: {:?}", e);
log::error!("Failed to read migration JSON: {}", e);
error_support::report_error!(
"webext-storage-migration-json",
"Failed to read migration JSON: {}",
e
);
Ok(None)
}
}

Просмотреть файл

@ -18,7 +18,7 @@ impl MigrationLogic for WebExtMigrationLogin {
const NAME: &'static str = "webext storage db";
const END_VERSION: u32 = 2;
fn prepare(&self, conn: &Connection) -> MigrationResult<()> {
fn prepare(&self, conn: &Connection, _db_empty: bool) -> MigrationResult<()> {
let initial_pragmas = "
-- We don't care about temp tables being persisted to disk.
PRAGMA temp_store = 2;

Просмотреть файл

@ -3,7 +3,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rusqlite::Transaction;
use sync15::engine::{ApplyResults, IncomingEnvelope, OutgoingEnvelope};
use sync15::bso::IncomingBso;
use sync15::engine::ApplyResults;
use sync_guid::Guid as SyncGuid;
use crate::db::{delete_meta, get_meta, put_meta, StorageDb};
@ -84,17 +85,14 @@ impl<'a> sync15::engine::BridgedEngine for BridgedEngine<'a> {
Ok(())
}
fn store_incoming(&self, incoming_envelopes: &[IncomingEnvelope]) -> Result<()> {
fn store_incoming(&self, incoming_bsos: Vec<IncomingBso>) -> Result<()> {
let signal = self.db.begin_interrupt_scope()?;
let mut incoming_payloads = Vec::with_capacity(incoming_envelopes.len());
for envelope in incoming_envelopes {
signal.err_if_interrupted()?;
incoming_payloads.push(envelope.payload()?);
}
let tx = self.db.unchecked_transaction()?;
stage_incoming(&tx, incoming_payloads, &signal)?;
let incoming_content: Vec<_> = incoming_bsos
.into_iter()
.map(IncomingBso::into_content::<super::WebextRecord>)
.collect();
stage_incoming(&tx, &incoming_content, &signal)?;
tx.commit()?;
Ok(())
}
@ -112,11 +110,7 @@ impl<'a> sync15::engine::BridgedEngine for BridgedEngine<'a> {
stage_outgoing(&tx)?;
tx.commit()?;
let outgoing = get_outgoing(self.db, &signal)?
.into_iter()
.map(OutgoingEnvelope::from)
.collect::<Vec<_>>();
Ok(outgoing.into())
Ok(get_outgoing(self.db, &signal)?.into())
}
fn set_uploaded(&self, _server_modified_millis: i64, ids: &[SyncGuid]) -> Result<()> {

Просмотреть файл

@ -6,18 +6,15 @@
// working out a plan for them, updating the local data and mirror, etc.
use interrupt_support::Interruptee;
use rusqlite::{
types::{Null, ToSql},
Connection, Row, Transaction,
};
use rusqlite::{Connection, Row, Transaction};
use sql_support::ConnExt;
use sync15::Payload;
use sync15::bso::{IncomingContent, IncomingKind};
use sync_guid::Guid as SyncGuid;
use crate::api::{StorageChanges, StorageValueChange};
use crate::error::*;
use super::{merge, remove_matching_keys, JsonMap, Record, RecordData};
use super::{merge, remove_matching_keys, JsonMap, WebextRecord};
/// The state data can be in. Could be represented as Option<JsonMap>, but this
/// is clearer and independent of how the data is stored.
@ -69,43 +66,44 @@ fn json_map_from_row(row: &Row<'_>, col: &str) -> Result<DataState> {
/// The actual processing is done via this table.
pub fn stage_incoming(
tx: &Transaction<'_>,
incoming_payloads: Vec<Payload>,
incoming_records: &[IncomingContent<WebextRecord>],
signal: &dyn Interruptee,
) -> Result<()> {
let mut incoming_records = Vec::with_capacity(incoming_payloads.len());
for payload in incoming_payloads {
incoming_records.push(payload.into_record::<Record>()?);
}
sql_support::each_sized_chunk(
&incoming_records,
incoming_records,
// We bind 3 params per chunk.
sql_support::default_max_variable_number() / 3,
|chunk, _| -> Result<()> {
let sql = format!(
"INSERT OR REPLACE INTO temp.storage_sync_staging
(guid, ext_id, data)
VALUES {}",
sql_support::repeat_multi_values(chunk.len(), 3)
);
let mut params = Vec::with_capacity(chunk.len() * 3);
for record in chunk {
signal.err_if_interrupted()?;
params.push(&record.guid as &dyn ToSql);
match &record.data {
RecordData::Data {
ref ext_id,
ref data,
} => {
params.push(ext_id);
params.push(data);
match &record.kind {
IncomingKind::Content(r) => {
params.push(Some(record.envelope.id.to_string()));
params.push(Some(r.ext_id.to_string()));
params.push(Some(r.data.clone()));
}
RecordData::Tombstone => {
params.push(&Null);
params.push(&Null);
IncomingKind::Tombstone => {
params.push(Some(record.envelope.id.to_string()));
params.push(None);
params.push(None);
}
IncomingKind::Malformed => {
log::error!("Ignoring incoming malformed record: {}", record.envelope.id);
}
}
}
tx.execute(&sql, rusqlite::params_from_iter(params))?;
// we might have skipped records
let actual_len = params.len() / 3;
if actual_len != 0 {
let sql = format!(
"INSERT OR REPLACE INTO temp.storage_sync_staging
(guid, ext_id, data)
VALUES {}",
sql_support::repeat_multi_values(actual_len, 3)
);
tx.execute(&sql, rusqlite::params_from_iter(params))?;
}
Ok(())
},
)?;
@ -482,7 +480,7 @@ mod tests {
use crate::api;
use interrupt_support::NeverInterrupts;
use serde_json::{json, Value};
use sync15::Payload;
use sync15::bso::IncomingBso;
// select simple int
fn ssi(conn: &Connection, stmt: &str) -> u32 {
@ -491,11 +489,11 @@ mod tests {
.unwrap_or_default()
}
fn array_to_incoming(mut array: Value) -> Vec<Payload> {
fn array_to_incoming(mut array: Value) -> Vec<IncomingContent<WebextRecord>> {
let jv = array.as_array_mut().expect("you must pass a json array");
let mut result = Vec::with_capacity(jv.len());
for elt in jv {
result.push(Payload::from_json(elt.take()).expect("must be valid"));
result.push(IncomingBso::from_test_content(elt.take()).into_content());
}
result
}
@ -561,7 +559,7 @@ mod tests {
}
]};
stage_incoming(&tx, array_to_incoming(incoming), &NeverInterrupts)?;
stage_incoming(&tx, &array_to_incoming(incoming), &NeverInterrupts)?;
// check staging table
assert_eq!(
ssi(&tx, "SELECT count(*) FROM temp.storage_sync_staging"),

Просмотреть файл

@ -12,7 +12,7 @@ mod sync_tests;
use crate::api::{StorageChanges, StorageValueChange};
use crate::db::StorageDb;
use crate::error::*;
use serde::{Deserialize, Deserializer};
use serde::Deserialize;
use serde_derive::*;
use sql_support::ConnExt;
use sync_guid::Guid as SyncGuid;
@ -24,40 +24,14 @@ type JsonMap = serde_json::Map<String, serde_json::Value>;
pub const STORAGE_VERSION: usize = 1;
// Note that we never use serde to serialize a tombstone, so it doesn't matter
// how that looks - but we care about how a Record with RecordData::Data looks.
// However, deserializing is trickier still - it seems tricky to tell serde
// how to unpack a tombstone - if we used `Tombstone { deleted: bool }` and
// enforced bool must only be ever true it might be possible, but that's quite
// clumsy for the rest of the code. So we just capture serde's failure to
// unpack it and treat it as a tombstone.
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(untagged)]
pub enum RecordData {
Data {
#[serde(rename = "extId")]
ext_id: String,
data: String,
},
#[serde(skip_deserializing)]
Tombstone,
}
#[allow(clippy::unnecessary_wraps)]
fn deserialize_record_data<'de, D>(deserializer: D) -> Result<RecordData, D::Error>
where
D: Deserializer<'de>,
{
Ok(RecordData::deserialize(deserializer).unwrap_or(RecordData::Tombstone))
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Record {
pub struct WebextRecord {
#[serde(rename = "id")]
guid: SyncGuid,
#[serde(flatten, deserialize_with = "deserialize_record_data")]
data: RecordData,
#[serde(rename = "extId")]
ext_id: String,
data: String,
}
// Perform a 2-way or 3-way merge, where the incoming value wins on confict.
@ -221,36 +195,16 @@ mod tests {
#[test]
fn test_serde_record_ser() {
assert_eq!(
serde_json::to_string(&Record {
serde_json::to_string(&WebextRecord {
guid: "guid".into(),
data: RecordData::Data {
ext_id: "ext_id".to_string(),
data: "data".to_string()
}
ext_id: "ext_id".to_string(),
data: "data".to_string()
})
.unwrap(),
r#"{"id":"guid","extId":"ext_id","data":"data"}"#
);
}
#[test]
fn test_serde_record_de() {
let p: Record = serde_json::from_str(r#"{"id":"guid","deleted":true}"#).unwrap();
assert_eq!(p.data, RecordData::Tombstone);
let p: Record =
serde_json::from_str(r#"{"id":"guid","extId": "ext-id", "data":"foo"}"#).unwrap();
assert_eq!(
p.data,
RecordData::Data {
ext_id: "ext-id".into(),
data: "foo".into()
}
);
// invalid are treated as tombstones.
let p: Record = serde_json::from_str(r#"{"id":"guid"}"#).unwrap();
assert_eq!(p.data, RecordData::Tombstone);
}
// a macro for these tests - constructs a serde_json::Value::Object
macro_rules! map {
($($map:tt)+) => {

Просмотреть файл

@ -8,28 +8,28 @@
use interrupt_support::Interruptee;
use rusqlite::{Connection, Row, Transaction};
use sql_support::ConnExt;
use sync15::Payload;
use sync15::bso::OutgoingBso;
use sync_guid::Guid as SyncGuid;
use crate::error::*;
use super::{Record, RecordData};
use super::WebextRecord;
fn outgoing_from_row(row: &Row<'_>) -> Result<Payload> {
fn outgoing_from_row(row: &Row<'_>) -> Result<OutgoingBso> {
let guid: SyncGuid = row.get("guid")?;
let ext_id: String = row.get("ext_id")?;
let raw_data: Option<String> = row.get("data")?;
let payload = match raw_data {
Some(raw_data) => Payload::from_record(Record {
guid,
data: RecordData::Data {
Ok(match raw_data {
Some(raw_data) => {
let record = WebextRecord {
guid,
ext_id,
data: raw_data,
},
})?,
None => Payload::new_tombstone(guid),
};
Ok(payload)
};
OutgoingBso::from_content_with_id(record)?
}
None => OutgoingBso::new_tombstone(guid.into()),
})
}
/// Stages info about what should be uploaded in a temp table. This should be
@ -69,8 +69,8 @@ pub fn stage_outgoing(tx: &Transaction<'_>) -> Result<()> {
Ok(())
}
/// Returns a vec of the payloads which should be uploaded.
pub fn get_outgoing(conn: &Connection, signal: &dyn Interruptee) -> Result<Vec<Payload>> {
/// Returns a vec of the outgoing records which should be uploaded.
pub fn get_outgoing(conn: &Connection, signal: &dyn Interruptee) -> Result<Vec<OutgoingBso>> {
let sql = "SELECT guid, ext_id, data FROM storage_sync_outgoing_staging";
let elts = conn
.conn()
@ -139,13 +139,16 @@ mod tests {
stage_outgoing(&tx)?;
let changes = get_outgoing(&tx, &NeverInterrupts)?;
assert_eq!(changes.len(), 1);
assert_eq!(changes[0].data["extId"], "ext_with_changes".to_string());
let record: serde_json::Value = serde_json::from_str(&changes[0].payload).unwrap();
let ext_id = record.get("extId").unwrap().as_str().unwrap();
assert_eq!(ext_id, "ext_with_changes");
record_uploaded(
&tx,
changes
.into_iter()
.map(|p| p.id)
.map(|p| p.envelope.id)
.collect::<Vec<SyncGuid>>()
.as_slice(),
&NeverInterrupts,
@ -160,20 +163,24 @@ mod tests {
#[test]
fn test_payload_serialization() {
let payload = Payload::from_record(Record {
let record = WebextRecord {
guid: SyncGuid::new("guid"),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: "{}".to_string(),
},
})
.unwrap();
// The payload should have the ID.
assert_eq!(payload.id.to_string(), "guid");
// The data in the payload should have only `data` and `extId` - not the guid.
assert!(!payload.data.contains_key("id"));
assert!(payload.data.contains_key("data"));
assert!(payload.data.contains_key("extId"));
assert_eq!(payload.data.len(), 2);
ext_id: "ext-id".to_string(),
data: "{}".to_string(),
};
let outgoing = OutgoingBso::from_content_with_id(record).unwrap();
// The envelope should have our ID.
assert_eq!(outgoing.envelope.id, "guid");
let outgoing_payload =
serde_json::from_str::<serde_json::Value>(&outgoing.payload).unwrap();
let outgoing_map = outgoing_payload.as_object().unwrap();
assert!(!outgoing_map.contains_key("id"));
assert!(outgoing_map.contains_key("data"));
assert!(outgoing_map.contains_key("extId"));
assert_eq!(outgoing_map.len(), 2);
}
}

Просмотреть файл

@ -12,17 +12,20 @@ use crate::schema::create_empty_sync_temp_tables;
use crate::sync::incoming::{apply_actions, get_incoming, plan_incoming, stage_incoming};
use crate::sync::outgoing::{get_outgoing, record_uploaded, stage_outgoing};
use crate::sync::test::new_syncable_mem_db;
use crate::sync::{Record, RecordData};
use crate::sync::WebextRecord;
use interrupt_support::NeverInterrupts;
use rusqlite::{Connection, Row, Transaction};
use serde_json::json;
use sql_support::ConnExt;
use sync15::Payload;
use sync15::bso::{IncomingBso, IncomingContent, OutgoingBso};
use sync_guid::Guid;
// Here we try and simulate everything done by a "full sync", just minus the
// engine. Returns the records we uploaded.
fn do_sync(tx: &Transaction<'_>, incoming_payloads: Vec<Payload>) -> Result<Vec<Payload>> {
fn do_sync(
tx: &Transaction<'_>,
incoming_payloads: &[IncomingContent<WebextRecord>],
) -> Result<Vec<OutgoingBso>> {
log::info!("test do_sync() starting");
// First we stage the incoming in the temp tables.
stage_incoming(tx, incoming_payloads, &NeverInterrupts)?;
@ -42,7 +45,7 @@ fn do_sync(tx: &Transaction<'_>, incoming_payloads: Vec<Payload>) -> Result<Vec<
tx,
outgoing
.iter()
.map(|p| p.id.clone())
.map(|p| p.envelope.id.clone())
.collect::<Vec<Guid>>()
.as_slice(),
&NeverInterrupts,
@ -69,11 +72,11 @@ fn check_finished_with(conn: &Connection, ext_id: &str, val: serde_json::Value)
Ok(())
}
fn get_mirror_guid(conn: &Connection, extid: &str) -> Result<String> {
fn get_mirror_guid(conn: &Connection, extid: &str) -> Result<Guid> {
let guid = conn.query_row_and_then(
"SELECT m.guid FROM storage_sync_mirror m WHERE m.ext_id = ?;",
[extid],
|row| row.get::<_, String>(0),
|row| row.get::<_, Guid>(0),
)?;
Ok(guid)
}
@ -105,7 +108,7 @@ fn _get(conn: &Connection, id_name: &str, expected_extid: &str, table: &str) ->
DbData::NoRow
} else {
let item = items.pop().expect("it exists");
assert_eq!(item.0, expected_extid);
assert_eq!(Guid::new(&item.0), expected_extid);
match item.1 {
None => DbData::NullRow,
Some(v) => DbData::Data(v),
@ -121,6 +124,19 @@ fn get_local_data(conn: &Connection, expected_extid: &str) -> DbData {
_get(conn, "ext_id", expected_extid, "storage_sync_data")
}
fn make_incoming(
guid: &Guid,
ext_id: &str,
data: &serde_json::Value,
) -> IncomingContent<WebextRecord> {
let content = json!({"id": guid, "extId": ext_id, "data": data.to_string()});
IncomingBso::from_test_content(content).into_content()
}
fn make_incoming_tombstone(guid: &Guid) -> IncomingContent<WebextRecord> {
IncomingBso::new_test_tombstone(guid.clone()).into_content()
}
#[test]
fn test_simple_outgoing_sync() -> Result<()> {
// So we are starting with an empty local store and empty server store.
@ -128,7 +144,7 @@ fn test_simple_outgoing_sync() -> Result<()> {
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
set(&tx, "ext-id", data.clone())?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
check_finished_with(&tx, "ext-id", data)?;
Ok(())
}
@ -138,14 +154,8 @@ fn test_simple_incoming_sync() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: data.to_string(),
},
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
let bridge_record = make_incoming(&Guid::new("guid"), "ext-id", &data);
assert_eq!(do_sync(&tx, &[bridge_record])?.len(), 0);
let key1_from_api = get(&tx, "ext-id", json!("key1"))?;
assert_eq!(key1_from_api, json!({"key1": "key1-value"}));
check_finished_with(&tx, "ext-id", data)?;
@ -169,7 +179,7 @@ fn test_outgoing_tombstone() -> Result<()> {
assert_eq!(get_local_data(&tx, "ext-id"), DbData::NoRow);
// now set data again and sync and *then* remove.
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
assert!(get_local_data(&tx, "ext-id").has_data());
let guid = get_mirror_guid(&tx, "ext-id")?;
assert!(get_mirror_data(&tx, &guid).has_data());
@ -177,7 +187,7 @@ fn test_outgoing_tombstone() -> Result<()> {
assert_eq!(get_local_data(&tx, "ext-id"), DbData::NullRow);
// then after syncing, the tombstone will be in the mirror but the local row
// has been removed.
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
assert_eq!(get_local_data(&tx, "ext-id"), DbData::NoRow);
assert_eq!(get_mirror_data(&tx, &guid), DbData::NullRow);
Ok(())
@ -196,15 +206,14 @@ fn test_incoming_tombstone_exists() -> Result<()> {
DbData::Data(data.to_string())
);
// sync to get data in our mirror.
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
assert!(get_local_data(&tx, "ext-id").has_data());
let guid = get_mirror_guid(&tx, "ext-id")?;
assert!(get_mirror_data(&tx, &guid).has_data());
// Now an incoming tombstone for it.
let payload = Payload::new_tombstone(guid.clone());
let tombstone = make_incoming_tombstone(&guid);
assert_eq!(
do_sync(&tx, vec![payload])?.len(),
do_sync(&tx, &[tombstone])?.len(),
0,
"expect no outgoing records"
);
@ -218,17 +227,16 @@ fn test_incoming_tombstone_not_exists() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
// An incoming tombstone for something that's not anywhere locally.
let guid = "guid";
let payload = Payload::new_tombstone(guid);
let guid = Guid::new("guid");
let tombstone = make_incoming_tombstone(&guid);
assert_eq!(
do_sync(&tx, vec![payload])?.len(),
do_sync(&tx, &[tombstone])?.len(),
0,
"expect no outgoing records"
);
// But we still keep the tombstone in the mirror.
assert_eq!(get_local_data(&tx, "ext-id"), DbData::NoRow);
assert_eq!(get_mirror_data(&tx, guid), DbData::NullRow);
assert_eq!(get_mirror_data(&tx, &guid), DbData::NullRow);
Ok(())
}
@ -239,15 +247,9 @@ fn test_reconciled() -> Result<()> {
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data)?;
// Incoming payload with the same data
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: json!({"key1": "key1-value"}).to_string(),
},
})?;
let record = make_incoming(&Guid::new("guid"), "ext-id", &json!({"key1": "key1-value"}));
// Should be no outgoing records as we reconciled.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
assert_eq!(do_sync(&tx, &[record])?.len(), 0);
check_finished_with(&tx, "ext-id", json!({"key1": "key1-value"}))?;
Ok(())
}
@ -261,21 +263,15 @@ fn test_reconcile_with_null_payload() -> Result<()> {
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data.clone())?;
// We try to push this change on the next sync.
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
assert_eq!(get_mirror_data(&tx, &guid), DbData::Data(data.to_string()));
// Incoming payload with the same data.
// This could happen if, for example, another client changed the
// key and then put it back the way it was.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: data.to_string(),
},
})?;
let record = make_incoming(&guid, "ext-id", &data);
// Should be no outgoing records as we reconciled.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
assert_eq!(do_sync(&tx, &[record])?.len(), 0);
check_finished_with(&tx, "ext-id", data)?;
Ok(())
}
@ -288,20 +284,15 @@ fn test_accept_incoming_when_local_is_deleted() -> Result<()> {
// uploaded before being deleted.
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
clear(&tx, "ext-id")?;
// Incoming payload without 'key1'. Because we previously uploaded
// key1, this means another client deleted it.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: json!({"key2": "key2-value"}).to_string(),
},
})?;
let record = make_incoming(&guid, "ext-id", &json!({"key2": "key2-value"}));
// We completely accept the incoming record.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
assert_eq!(do_sync(&tx, &[record])?.len(), 0);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
@ -312,20 +303,16 @@ fn test_accept_incoming_when_local_is_deleted_no_mirror() -> Result<()> {
let tx = db.transaction()?;
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
clear(&tx, "ext-id")?;
let payload = Payload::from_record(Record {
// Use a random guid so that we don't find the mirrored data.
// This test is somewhat bad because deduping might obviate
// the need for it.
guid: Guid::from("guid"),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: json!({"key2": "key2-value"}).to_string(),
},
})?;
// Use a random guid so that we don't find the mirrored data.
// This test is somewhat bad because deduping might obviate
// the need for it.
let record = make_incoming(&Guid::new("guid"), "ext-id", &json!({"key2": "key2-value"}));
// We completely accept the incoming record.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
assert_eq!(do_sync(&tx, &[record])?.len(), 0);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
@ -336,19 +323,13 @@ fn test_accept_deleted_key_mirrored() -> Result<()> {
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
// Incoming payload without 'key1'. Because we previously uploaded
// key1, this means another client deleted it.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: json!({"key2": "key2-value"}).to_string(),
},
})?;
let record = make_incoming(&guid, "ext-id", &json!({"key2": "key2-value"}));
// We completely accept the incoming record.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
assert_eq!(do_sync(&tx, &[record])?.len(), 0);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
@ -362,14 +343,8 @@ fn test_merged_no_mirror() -> Result<()> {
// Incoming payload without 'key1' and some data for 'key2'.
// Because we never uploaded 'key1', we merge our local values
// with the remote.
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: json!({"key2": "key2-value"}).to_string(),
},
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
let record = make_incoming(&Guid::new("guid"), "ext-id", &json!({"key2": "key2-value"}));
assert_eq!(do_sync(&tx, &[record])?.len(), 1);
check_finished_with(
&tx,
"ext-id",
@ -384,7 +359,7 @@ fn test_merged_incoming() -> Result<()> {
let tx = db.transaction()?;
let old_data = json!({"key1": "key1-value", "key2": "key2-value", "doomed_key": "deletable"});
set(&tx, "ext-id", old_data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
// We update 'key1' locally.
let local_data = json!({"key1": "key1-new", "key2": "key2-value", "doomed_key": "deletable"});
@ -393,15 +368,13 @@ fn test_merged_incoming() -> Result<()> {
// the 'doomed_key'.
// Because we never uploaded our data, we'll merge our
// key1 in, but otherwise keep the server's changes.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: json!({"key1": "key1-value", "key2": "key2-incoming"}).to_string(),
},
})?;
let record = make_incoming(
&guid,
"ext-id",
&json!({"key1": "key1-value", "key2": "key2-incoming"}),
);
// We should send our 'key1'
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
assert_eq!(do_sync(&tx, &[record])?.len(), 1);
check_finished_with(
&tx,
"ext-id",
@ -417,7 +390,7 @@ fn test_merged_with_null_payload() -> Result<()> {
let old_data = json!({"key1": "key1-value"});
set(&tx, "ext-id", old_data.clone())?;
// Push this change remotely.
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
assert_eq!(
get_mirror_data(&tx, &guid),
@ -426,16 +399,10 @@ fn test_merged_with_null_payload() -> Result<()> {
let local_data = json!({"key1": "key1-new", "key2": "key2-value"});
set(&tx, "ext-id", local_data.clone())?;
// Incoming payload with the same old data.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: old_data.to_string(),
},
})?;
let record = make_incoming(&guid, "ext-id", &old_data);
// Three-way-merge will not detect any change in key1, so we
// should keep our entire new value.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
assert_eq!(do_sync(&tx, &[record])?.len(), 1);
check_finished_with(&tx, "ext-id", local_data)?;
Ok(())
}
@ -446,16 +413,13 @@ fn test_deleted_mirrored_object_accept() -> Result<()> {
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
// Incoming payload with data deleted.
// We synchronize this deletion by deleting the keys we think
// were on the server.
let payload = Payload::from_record(Record {
guid: Guid::from(guid.clone()),
data: RecordData::Tombstone,
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
let record = make_incoming_tombstone(&guid);
assert_eq!(do_sync(&tx, &[record])?.len(), 0);
assert_eq!(get_local_data(&tx, "ext-id"), DbData::NoRow);
assert_eq!(get_mirror_data(&tx, &guid), DbData::NullRow);
Ok(())
@ -466,7 +430,7 @@ fn test_deleted_mirrored_object_merged() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
set(&tx, "ext-id", json!({"key1": "key1-value"}))?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
set(
&tx,
@ -476,12 +440,9 @@ fn test_deleted_mirrored_object_merged() -> Result<()> {
// Incoming payload with data deleted.
// We synchronize this deletion by deleting the keys we think
// were on the server.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
data: RecordData::Tombstone,
})?;
let record = make_incoming_tombstone(&guid);
// This overrides the change to 'key1', but we still upload 'key2'.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
assert_eq!(do_sync(&tx, &[record])?.len(), 1);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
@ -493,25 +454,19 @@ fn test_deleted_mirrored_tombstone_merged() -> Result<()> {
let tx = db.transaction()?;
// Sync some data so we can get the guid for this extension.
set(&tx, "ext-id", json!({"key1": "key1-value"}))?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(do_sync(&tx, &[])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
// Sync a delete for this data so we have a tombstone in the mirror.
let payload = Payload::from_record(Record {
guid: Guid::from(guid.clone()),
data: RecordData::Tombstone,
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
let record = make_incoming_tombstone(&guid);
assert_eq!(do_sync(&tx, &[record])?.len(), 0);
assert_eq!(get_mirror_data(&tx, &guid), DbData::NullRow);
// Set some data and sync it simultaneously with another incoming delete.
set(&tx, "ext-id", json!({"key2": "key2-value"}))?;
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
data: RecordData::Tombstone,
})?;
let record = make_incoming_tombstone(&guid);
// We cannot delete any matching keys because there are no
// matching keys. Instead we push our data.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
assert_eq!(do_sync(&tx, &[record])?.len(), 1);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
@ -523,14 +478,11 @@ fn test_deleted_not_mirrored_object_merged() -> Result<()> {
let data = json!({"key1": "key1-value", "key2": "key2-value"});
set(&tx, "ext-id", data)?;
// Incoming payload with data deleted.
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
data: RecordData::Tombstone,
})?;
let record = make_incoming_tombstone(&Guid::new("guid"));
// We normally delete the keys we think were on the server, but
// here we have no information about what was on the server, so we
// don't delete anything. We merge in all undeleted keys.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
assert_eq!(do_sync(&tx, &[record])?.len(), 1);
check_finished_with(
&tx,
"ext-id",
@ -548,15 +500,13 @@ fn test_conflicting_incoming() -> Result<()> {
// Incoming payload without 'key1' and conflicting for 'key2'.
// Because we never uploaded either of our keys, we'll merge our
// key1 in, but the server key2 wins.
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
data: RecordData::Data {
ext_id: "ext-id".to_string(),
data: json!({"key2": "key2-incoming"}).to_string(),
},
})?;
let record = make_incoming(
&Guid::new("guid"),
"ext-id",
&json!({"key2": "key2-incoming"}),
);
// We should send our 'key1'
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
assert_eq!(do_sync(&tx, &[record])?.len(), 1);
check_finished_with(
&tx,
"ext-id",
@ -564,3 +514,16 @@ fn test_conflicting_incoming() -> Result<()> {
)?;
Ok(())
}
#[test]
fn test_invalid_incoming() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let json = json!({"id": "id", "payload": json!("").to_string()});
let bso = serde_json::from_value::<IncomingBso>(json).unwrap();
let record = bso.into_content();
// Should do nothing.
assert_eq!(do_sync(&tx, &[record])?.len(), 0);
Ok(())
}