Backed out 2 changesets (bug 1888472) for causing failures at test_StorageSyncService.js. CLOSED TREE

Backed out changeset c6d3b6e81216 (bug 1888472)
Backed out changeset 9a58bdc9bb10 (bug 1888472)
This commit is contained in:
Butkovits Atila 2024-09-04 23:19:33 +03:00
Родитель b8436fd050
Коммит 585cb9affe
64 изменённых файлов: 3359 добавлений и 3342 удалений

Просмотреть файл

@ -60,9 +60,9 @@ git = "https://github.com/mozilla-spidermonkey/jsparagus"
rev = "61f399c53a641ebd3077c1f39f054f6d396a633c"
replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b"]
[source."git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"]
git = "https://github.com/mozilla/application-services"
rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/audioipc?rev=e6f44a2bd1e57d11dfc737632a9e849077632330"]

54
Cargo.lock сгенерированный
Просмотреть файл

@ -1708,7 +1708,7 @@ dependencies = [
[[package]]
name = "error-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"error-support-macros",
"lazy_static",
@ -1720,7 +1720,7 @@ dependencies = [
[[package]]
name = "error-support-macros"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"proc-macro2",
"quote",
@ -2374,7 +2374,7 @@ dependencies = [
"uniffi-fixture-refcounts",
"url",
"viaduct",
"webext-storage",
"webext_storage_bridge",
"webrender_bindings",
"wgpu_bindings",
"wpf-gpu-raster",
@ -3036,7 +3036,7 @@ dependencies = [
[[package]]
name = "interrupt-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"lazy_static",
"parking_lot",
@ -4250,7 +4250,7 @@ dependencies = [
[[package]]
name = "nss_build_common"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
[[package]]
name = "nsstring"
@ -4463,7 +4463,7 @@ checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba"
[[package]]
name = "payload-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"serde",
"serde_derive",
@ -4935,7 +4935,7 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
[[package]]
name = "relevancy"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"anyhow",
"base64 0.21.3",
@ -4958,7 +4958,7 @@ dependencies = [
[[package]]
name = "remote_settings"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"log",
"parking_lot",
@ -5516,7 +5516,7 @@ dependencies = [
[[package]]
name = "sql-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"ffi-support",
"interrupt-support",
@ -5698,7 +5698,7 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "suggest"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"anyhow",
"chrono",
@ -5750,7 +5750,7 @@ dependencies = [
[[package]]
name = "sync-guid"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"base64 0.21.3",
"rand",
@ -5761,7 +5761,7 @@ dependencies = [
[[package]]
name = "sync15"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"anyhow",
"error-support",
@ -5793,7 +5793,7 @@ dependencies = [
[[package]]
name = "tabs"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"anyhow",
"error-support",
@ -6118,7 +6118,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"rusqlite",
"serde",
@ -6485,7 +6485,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "viaduct"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"ffi-support",
"log",
@ -6633,7 +6633,7 @@ dependencies = [
[[package]]
name = "webext-storage"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=b94438a80dc0e6b9fbd521ac99b032dd1e95608b#b94438a80dc0e6b9fbd521ac99b032dd1e95608b"
source = "git+https://github.com/mozilla/application-services?rev=dbeaef2eb28e9da7cc2f96e26296513cc4e07c07#dbeaef2eb28e9da7cc2f96e26296513cc4e07c07"
dependencies = [
"anyhow",
"error-support",
@ -6655,6 +6655,28 @@ dependencies = [
"url",
]
[[package]]
name = "webext_storage_bridge"
version = "0.1.0"
dependencies = [
"anyhow",
"atomic_refcell",
"cstr",
"golden_gate",
"interrupt-support",
"moz_task",
"nserror",
"nsstring",
"once_cell",
"serde",
"serde_json",
"sql-support",
"storage_variant",
"thin-vec",
"webext-storage",
"xpcom",
]
[[package]]
name = "webrender"
version = "0.62.0"

Просмотреть файл

@ -210,14 +210,14 @@ midir = { git = "https://github.com/mozilla/midir.git", rev = "85156e360a37d8517
malloc_size_of_derive = { path = "xpcom/rust/malloc_size_of_derive" }
# application-services overrides to make updating them all simpler.
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b" }
relevancy = { git = "https://github.com/mozilla/application-services", rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b" }
sql-support = { git = "https://github.com/mozilla/application-services", rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b" }
suggest = { git = "https://github.com/mozilla/application-services", rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b" }
sync15 = { git = "https://github.com/mozilla/application-services", rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b" }
tabs = { git = "https://github.com/mozilla/application-services", rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b" }
viaduct = { git = "https://github.com/mozilla/application-services", rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b" }
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "b94438a80dc0e6b9fbd521ac99b032dd1e95608b" }
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07" }
relevancy = { git = "https://github.com/mozilla/application-services", rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07" }
sql-support = { git = "https://github.com/mozilla/application-services", rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07" }
suggest = { git = "https://github.com/mozilla/application-services", rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07" }
sync15 = { git = "https://github.com/mozilla/application-services", rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07" }
tabs = { git = "https://github.com/mozilla/application-services", rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07" }
viaduct = { git = "https://github.com/mozilla/application-services", rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07" }
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "dbeaef2eb28e9da7cc2f96e26296513cc4e07c07" }
# Patch `gpu-descriptor` 0.3.0 to remove unnecessary `allocator-api2` dep.:
# Still waiting for the now-merged <https://github.com/zakarumych/gpu-descriptor/pull/40> to be released.

Просмотреть файл

@ -111,3 +111,10 @@ The following XPCOM components are written in Rust.
which [merges](https://mozilla.github.io/dogear) bookmarks from Firefox Sync
with bookmarks in the Places database.
[There's also some docs on how Rust interacts with Sync](/services/sync/rust-engines.rst)
- [webext_storage_bridge](https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge),
which powers the WebExtension storage.sync API. It's a self-contained example
that pulls in a crate from application-services for the heavy lifting, wraps
that up in a Rust XPCOM component, and then wraps the component in a JS
interface. There's also some boilerplate there around adding a
`components.conf` file, and a dummy C++ header that declares the component
constructor. [It has some in-depth documentation on how it hangs together](../toolkit/components/extensions/webextensions/webext-storage.rst).

Просмотреть файл

@ -3706,6 +3706,7 @@ pref("webextensions.webRequest.requestBodyMaxRawBytes", 16777216);
pref("webextensions.storage.session.enforceQuota", false);
#endif
pref("webextensions.storage.sync.enabled", true);
// Should we use the old kinto-based implementation of storage.sync? To be removed in bug 1637465.
pref("webextensions.storage.sync.kinto", false);
// Server used by the old kinto-based implementation of storage.sync.

Просмотреть файл

@ -125,8 +125,9 @@ WebExt-Storage
webext-storage is implemented in Rust and lives in
`application services <https://github.com/mozilla/application-services/tree/main/components/webext-storage>`_
and is vendored into the addons code - note that this includes the storage
*and* Sync code. The Sync engine itself is a shim in the sync directory.
and is vendored into the `addons code <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge>`_ -
note that this includes the storage *and* Sync code. The Sync engine itself
is a shim in the sync directory.
See the :doc:`rust-engines` document for more about how rust engines are
integrated.

Просмотреть файл

@ -17,10 +17,21 @@ The bridge
==========
`"Golden Gate" <https://searchfox.org/mozilla-central/source/services/sync/golden_gate>`_
was previously used to help bridge any Rust implemented Sync engines with desktop,
but most of that logic has been removed. The integration of `UniFFI <https://github.com/mozilla/uniffi-rs>`_-ed components
made the Golden Gate bridge code obsolete. Currently Golden Gate contains the
logging logic for the components and the bridged engines exist in application
services within the respective sync components. For instance, these are bridged
engines for `tabs <https://github.com/mozilla/application-services/blob/main/components/tabs/src/sync/bridge.rs>`_ and
`webext-storage <https://github.com/mozilla/application-services/blob/main/components/webext-storage/src/sync/bridge.rs>`_.
is a utility to help bridge any Rust implemented Sync engines with desktop. In
other words, it's a "rusty bridge" - get it? Get it? Yet another of Lina's puns
that live on!
One of the key challenges with integrating a Rust Sync component with desktop
is the different threading models. The Rust code tends to be synchronous -
most functions block the calling thread to do the disk or network IO necessary
to work - it assumes that the consumer will delegate this to some other thread.
So golden_gate is this background thread delegation for a Rust Sync engine -
gecko calls golden-gate on the main thread, it marshalls the call to a worker
thread, and the result is marshalled back to the main thread.
It's worth noting that golden_gate is just for the Sync engine part - other
parts of the component (ie, the part that provides the functionality that's not
sync related) will have its own mechanism for this. For example, the
`webext-storage bridge <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge/src>`_
uses a similar technique `which has some in-depth documentation <../../toolkit/components/extensions/webextensions/webext-storage.html>`_.

Просмотреть файл

@ -0,0 +1,74 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use nsstring::nsCString;
use storage_variant::VariantType;
use sync15::Guid;
use xpcom::{interfaces::nsIVariant, RefPtr};
/// An operation that runs on the background thread, and optionally passes a
/// result to its callback.
pub enum Ferry {
LastSync,
SetLastSync(i64),
SyncId,
ResetSyncId,
EnsureCurrentSyncId(String),
SyncStarted,
StoreIncoming(Vec<nsCString>),
SetUploaded(i64, Vec<Guid>),
SyncFinished,
Reset,
Wipe,
}
impl Ferry {
/// Returns the operation name for debugging and labeling the task
/// runnable.
pub fn name(&self) -> &'static str {
match self {
Ferry::LastSync => concat!(module_path!(), "getLastSync"),
Ferry::SetLastSync(_) => concat!(module_path!(), "setLastSync"),
Ferry::SyncId => concat!(module_path!(), "getSyncId"),
Ferry::ResetSyncId => concat!(module_path!(), "resetSyncId"),
Ferry::EnsureCurrentSyncId(_) => concat!(module_path!(), "ensureCurrentSyncId"),
Ferry::SyncStarted => concat!(module_path!(), "syncStarted"),
Ferry::StoreIncoming { .. } => concat!(module_path!(), "storeIncoming"),
Ferry::SetUploaded { .. } => concat!(module_path!(), "setUploaded"),
Ferry::SyncFinished => concat!(module_path!(), "syncFinished"),
Ferry::Reset => concat!(module_path!(), "reset"),
Ferry::Wipe => concat!(module_path!(), "wipe"),
}
}
}
/// The result of a ferry task, sent from the background thread back to the
/// main thread. Results are converted to variants, and passed as arguments to
/// `mozIBridgedSyncEngineCallback`s.
pub enum FerryResult {
LastSync(i64),
SyncId(Option<String>),
AssignedSyncId(String),
Null,
}
impl Default for FerryResult {
fn default() -> Self {
FerryResult::Null
}
}
impl FerryResult {
/// Converts the result to an `nsIVariant` that can be passed as an
/// argument to `callback.handleResult()`.
pub fn into_variant(self) -> RefPtr<nsIVariant> {
match self {
FerryResult::LastSync(v) => v.into_variant(),
FerryResult::SyncId(Some(v)) => nsCString::from(v).into_variant(),
FerryResult::SyncId(None) => ().into_variant(),
FerryResult::AssignedSyncId(v) => nsCString::from(v).into_variant(),
FerryResult::Null => ().into_variant(),
}
}
}

Просмотреть файл

@ -2,16 +2,118 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! **Golden Gate** 🌉 was created to bridge Desktop Sync to our suite of
//! Rust sync and storage components. But the UniFFI-cation of our
//! components made much of Golden Gate's logic obsolete. It is now mainly
//! a means to access LogSink, the logger for our components.
//! **Golden Gate** 🌉 is a crate for bridging Desktop Sync to our suite of
//! Rust sync and storage components. It connects Sync's `BridgedEngine` class
//! to the Rust `BridgedEngine` trait via the `mozIBridgedSyncEngine` XPCOM
//! interface.
//!
//! Due to limitations in implementing XPCOM interfaces for generic types,
//! Golden Gate doesn't implement `mozIBridgedSyncEngine` directly. Instead,
//! it provides helpers, called "ferries", for passing Sync records between
//! JavaScript and Rust. The ferries also handle threading and type
//! conversions.
//!
//! Here's a step-by-step guide for adding a new Rust Sync engine to Firefox.
//!
//! ## Step 1: Create your (XPCOM) bridge
//!
//! In your consuming crate, define a type for your `mozIBridgedSyncEngine`
//! implementation. We'll call this type the **brige**. The bridge is
//! responsible for exposing your Sync engine to XPIDL [^1], in a way that lets
//! JavaScript call it.
//!
//! For your bridge type, you'll need to implement an xpcom interface with the
//! `#[xpcom(implement(mozIBridgedSyncEngine), nonatomic)]` attribute then
//! define `xpcom_method!()` stubs for the `mozIBridgedSyncEngine` methods. For
//! more details about implementing XPCOM methods in Rust, check out the docs in
//! `xpcom/rust/xpcom/src/method.rs`.
//!
//! You'll also need to add an entry for your bridge type to `components.conf`,
//! and define C++ and Rust constructors for it, so that JavaScript code can
//! create instances of it. Check out `NS_NewWebExtStorage` (and, in C++,
//! `mozilla::extensions::storageapi::NewWebExtStorage`) and
//! `NS_NewSyncedBookmarksMerger` (`mozilla::places::NewSyncedBookmarksMerger`
//! in C++) for how to do this.
//!
//! [^1]: You can think of XPIDL as a souped-up C FFI, with richer types and a
//! degree of type safety.
//!
//! ## Step 2: Add a background task queue to your bridge
//!
//! A task queue lets your engine do I/O, merging, and other syncing tasks on a
//! background thread pool. This is important because database reads and writes
//! can take an unpredictable amount of time. Doing these on the main thread can
//! cause jank, and, in the worst case, lock up the browser UI for seconds at a
//! time.
//!
//! The `moz_task` crate provides a `create_background_task_queue` function to
//! do this. Once you have a queue, you can use it to call into your Rust
//! engine. Golden Gate takes care of ferrying arguments back and forth across
//! the thread boundary.
//!
//! Since it's a queue, ferries arrive in the order they're scheduled, so
//! your engine's `store_incoming` method will always be called before `apply`,
//! which is likewise called before `set_uploaded`. The thread manager scales
//! the pool for you; you don't need to create or manage your own threads.
//!
//! ## Step 3: Create your Rust engine
//!
//! Next, you'll need to implement the Rust side of the bridge. This is a type
//! that implements the `BridgedEngine` trait.
//!
//! Bridged engines handle storing incoming Sync records, merging changes,
//! resolving conflicts, and fetching outgoing records for upload. Under the
//! hood, your engine will hold either a database connection directly, or
//! another object that does.
//!
//! Although outside the scope of Golden Gate, your engine will also likely
//! expose a data storage API, for fetching, updating, and deleting items
//! locally. Golden Gate provides the syncing layer on top of this local store.
//!
//! A `BridgedEngine` itself doesn't need to be `Send` or `Sync`, but the
//! ferries require both, since they're calling into your bridge on the
//! background task queue.
//!
//! In practice, this means your bridge will need to hold a thread-safe owned
//! reference to the engine, via `Arc<Mutex<BridgedEngine>>`. In fact, this
//! pattern is so common that Golden Gate implements `BridgedEngine` for any
//! `Mutex<BridgedEngine>`, which automatically locks the mutex before calling
//! into the engine.
//!
//! ## Step 4: Connect the bridge to the JavaScript and Rust sides
//!
//! On the JavaScript side, you'll need to subclass Sync's `BridgedEngine`
//! class, and give it a handle to your XPCOM bridge. The base class has all the
//! machinery for hooking up any `mozIBridgedSyncEngine` implementation so that
//! Sync can drive it.
//!
//! On the Rust side, each `mozIBridgedSyncEngine` method should create a
//! Golden Gate ferry, and dispatch it to the background task queue. The
//! ferries correspond to the method names. For example, `ensureCurrentSyncId`
//! should create a `Ferry::ensure_current_sync_id(...)`; `storeIncoming`, a
//! `Ferry::store_incoming(...)`; and so on. This is mostly boilerplate.
//!
//! And that's it! Each ferry will, in turn, call into your Rust
//! `BridgedEngine`, and send the results back to JavaScript.
//!
//! For an example of how all this works, including exposing a storage (not
//! just syncing!) API to JS via XPIDL, check out `webext_storage::Bridge` for
//! the `storage.sync` API!
#[macro_use]
extern crate cstr;
pub mod error;
mod ferry;
pub mod log;
pub mod task;
pub use crate::log::LogSink;
pub use error::{Error, Result};
// Re-export items from `interrupt-support` and `sync15`, so that
// consumers of `golden_gate` don't have to depend on them.
pub use interrupt_support::{Interrupted, Interruptee};
pub use sync15::bso::{IncomingBso, OutgoingBso};
pub use sync15::engine::{ApplyResults, BridgedEngine};
pub use sync15::Guid;
pub use task::{ApplyTask, FerryTask};

Просмотреть файл

@ -0,0 +1,355 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{fmt::Write, mem, result};
use atomic_refcell::AtomicRefCell;
use moz_task::{DispatchOptions, Task, TaskRunnable, ThreadPtrHandle, ThreadPtrHolder};
use nserror::{nsresult, NS_ERROR_FAILURE};
use nsstring::{nsACString, nsCString};
use sync15::engine::{ApplyResults, BridgedEngine};
use sync15::Guid;
use thin_vec::ThinVec;
use xpcom::{
interfaces::{
mozIBridgedSyncEngineApplyCallback, mozIBridgedSyncEngineCallback, nsIEventTarget,
},
RefPtr,
};
use crate::error::{Error, Result};
use crate::ferry::{Ferry, FerryResult};
/// A ferry task sends (or ferries) an operation to a bridged engine on a
/// background thread or task queue, and ferries back an optional result to
/// a callback.
pub struct FerryTask {
/// We want to ensure scheduled ferries can't block finalization of the underlying
/// store - we want a degree of confidence that closing the database will happen when
/// we want even if tasks are queued up to run on another thread.
/// We rely on the semantics of our BridgedEngines to help here:
/// * A bridged engine is expected to hold a weak reference to its store.
/// * Our LazyStore is the only thing holding a reference to the "real" store.
/// Thus, when our LazyStore asks our "real" store to close, we can be confident
/// a close will happen (ie, we assume that the real store will be able to unwrapp
/// the underlying sqlite `Connection` (using `Arc::try_unwrap`) and close it.
/// However, note that if an operation on the bridged engine is currently running,
/// we will block waiting for that operation to complete, so while this isn't
/// guaranteed to happen immediately, it should happen "soon enough".
engine: Box<dyn BridgedEngine>,
ferry: Ferry,
callback: ThreadPtrHandle<mozIBridgedSyncEngineCallback>,
result: AtomicRefCell<anyhow::Result<FerryResult>>,
}
impl FerryTask {
/// Creates a task to fetch the engine's last sync time, in milliseconds.
#[inline]
pub fn for_last_sync(
engine: Box<dyn BridgedEngine>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(engine, Ferry::LastSync, callback)
}
/// Creates a task to set the engine's last sync time, in milliseconds.
#[inline]
pub fn for_set_last_sync(
engine: Box<dyn BridgedEngine>,
last_sync_millis: i64,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(engine, Ferry::SetLastSync(last_sync_millis), callback)
}
/// Creates a task to fetch the engine's sync ID.
#[inline]
pub fn for_sync_id(
engine: Box<dyn BridgedEngine>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(engine, Ferry::SyncId, callback)
}
/// Creates a task to reset the engine's sync ID and all its local Sync
/// metadata.
#[inline]
pub fn for_reset_sync_id(
engine: Box<dyn BridgedEngine>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(engine, Ferry::ResetSyncId, callback)
}
/// Creates a task to compare the bridged engine's local sync ID with
/// the `new_sync_id` from `meta/global`, and ferry back the final sync ID
/// to use.
#[inline]
pub fn for_ensure_current_sync_id(
engine: Box<dyn BridgedEngine>,
new_sync_id: &nsACString,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(
engine,
Ferry::EnsureCurrentSyncId(std::str::from_utf8(new_sync_id)?.into()),
callback,
)
}
/// Creates a task to signal that the engine is about to sync.
#[inline]
pub fn for_sync_started(
engine: Box<dyn BridgedEngine>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(engine, Ferry::SyncStarted, callback)
}
/// Creates a task to store incoming records.
pub fn for_store_incoming(
engine: Box<dyn BridgedEngine>,
incoming_envelopes_json: &[nsCString],
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(
engine,
Ferry::StoreIncoming(incoming_envelopes_json.to_vec()),
callback,
)
}
/// Creates a task to mark a subset of outgoing records as uploaded. This
/// may be called multiple times per sync, or not at all if there are no
/// records to upload.
pub fn for_set_uploaded(
engine: Box<dyn BridgedEngine>,
server_modified_millis: i64,
uploaded_ids: &[nsCString],
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
let uploaded_ids = uploaded_ids.iter().map(|id| Guid::from_slice(id)).collect();
Self::with_ferry(
engine,
Ferry::SetUploaded(server_modified_millis, uploaded_ids),
callback,
)
}
/// Creates a task to signal that all records have been uploaded, and
/// the engine has been synced. This is called even if there were no
/// records uploaded.
#[inline]
pub fn for_sync_finished(
engine: Box<dyn BridgedEngine>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(engine, Ferry::SyncFinished, callback)
}
/// Creates a task to reset all local Sync state for the engine, without
/// erasing user data.
#[inline]
pub fn for_reset(
engine: Box<dyn BridgedEngine>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(engine, Ferry::Reset, callback)
}
/// Creates a task to erase all local user data for the engine.
#[inline]
pub fn for_wipe(
engine: Box<dyn BridgedEngine>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
Self::with_ferry(engine, Ferry::Wipe, callback)
}
/// Creates a task for a ferry. The `callback` is bound to the current
/// thread, and will be called once, after the ferry returns from the
/// background thread.
fn with_ferry(
engine: Box<dyn BridgedEngine>,
ferry: Ferry,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<FerryTask> {
let name = ferry.name();
Ok(FerryTask {
engine,
ferry,
callback: ThreadPtrHolder::new(
cstr!("mozIBridgedSyncEngineCallback"),
RefPtr::new(callback),
)?,
result: AtomicRefCell::new(Err(Error::DidNotRun(name).into())),
})
}
/// Dispatches the task to the given thread `target`.
pub fn dispatch(self, target: &nsIEventTarget) -> Result<()> {
let runnable = TaskRunnable::new(self.ferry.name(), Box::new(self))?;
// `may_block` schedules the task on the I/O thread pool, since we
// expect most operations to wait on I/O.
TaskRunnable::dispatch_with_options(
runnable,
target,
DispatchOptions::default().may_block(true),
)?;
Ok(())
}
/// Runs the task on the background thread. This is split out into its own
/// method to make error handling easier.
fn inner_run(&self) -> anyhow::Result<FerryResult> {
let engine = &self.engine;
Ok(match &self.ferry {
Ferry::LastSync => FerryResult::LastSync(engine.last_sync()?),
Ferry::SetLastSync(last_sync_millis) => {
engine.set_last_sync(*last_sync_millis)?;
FerryResult::default()
}
Ferry::SyncId => FerryResult::SyncId(engine.sync_id()?),
Ferry::ResetSyncId => FerryResult::AssignedSyncId(engine.reset_sync_id()?),
Ferry::EnsureCurrentSyncId(new_sync_id) => {
FerryResult::AssignedSyncId(engine.ensure_current_sync_id(new_sync_id)?)
}
Ferry::SyncStarted => {
engine.sync_started()?;
FerryResult::default()
}
Ferry::StoreIncoming(incoming_envelopes_json) => {
let incoming_envelopes = incoming_envelopes_json
.iter()
.map(|envelope| Ok(serde_json::from_slice(envelope)?))
.collect::<Result<_>>()?;
engine.store_incoming(incoming_envelopes)?;
FerryResult::default()
}
Ferry::SetUploaded(server_modified_millis, uploaded_ids) => {
engine.set_uploaded(*server_modified_millis, uploaded_ids.as_slice())?;
FerryResult::default()
}
Ferry::SyncFinished => {
engine.sync_finished()?;
FerryResult::default()
}
Ferry::Reset => {
engine.reset()?;
FerryResult::default()
}
Ferry::Wipe => {
engine.wipe()?;
FerryResult::default()
}
})
}
}
impl Task for FerryTask {
fn run(&self) {
*self.result.borrow_mut() = self.inner_run();
}
fn done(&self) -> result::Result<(), nsresult> {
let callback = self.callback.get().unwrap();
match mem::replace(
&mut *self.result.borrow_mut(),
Err(Error::DidNotRun(self.ferry.name()).into()),
) {
Ok(result) => unsafe { callback.HandleSuccess(result.into_variant().coerce()) },
Err(err) => {
let mut message = nsCString::new();
write!(message, "{err}").unwrap();
unsafe { callback.HandleError(NS_ERROR_FAILURE, &*message) }
}
}
.to_result()
}
}
/// An apply task ferries incoming records to an engine on a background
/// thread, and ferries back records to upload. It's separate from
/// `FerryTask` because its callback type is different.
pub struct ApplyTask {
engine: Box<dyn BridgedEngine>,
callback: ThreadPtrHandle<mozIBridgedSyncEngineApplyCallback>,
result: AtomicRefCell<anyhow::Result<Vec<String>>>,
}
impl ApplyTask {
/// Returns the task name for debugging.
pub fn name() -> &'static str {
concat!(module_path!(), "apply")
}
/// Runs the task on the background thread.
fn inner_run(&self) -> anyhow::Result<Vec<String>> {
let ApplyResults {
records: outgoing_records,
..
} = self.engine.apply()?;
let outgoing_records_json = outgoing_records
.iter()
.map(|record| Ok(serde_json::to_string(record)?))
.collect::<Result<_>>()?;
Ok(outgoing_records_json)
}
/// Creates a task. The `callback` is bound to the current thread, and will
/// be called once, after the records are applied on the background thread.
pub fn new(
engine: Box<dyn BridgedEngine>,
callback: &mozIBridgedSyncEngineApplyCallback,
) -> Result<ApplyTask> {
Ok(ApplyTask {
engine,
callback: ThreadPtrHolder::new(
cstr!("mozIBridgedSyncEngineApplyCallback"),
RefPtr::new(callback),
)?,
result: AtomicRefCell::new(Err(Error::DidNotRun(Self::name()).into())),
})
}
/// Dispatches the task to the given thread `target`.
pub fn dispatch(self, target: &nsIEventTarget) -> Result<()> {
let runnable = TaskRunnable::new(Self::name(), Box::new(self))?;
TaskRunnable::dispatch_with_options(
runnable,
target,
DispatchOptions::default().may_block(true),
)?;
Ok(())
}
}
impl Task for ApplyTask {
fn run(&self) {
*self.result.borrow_mut() = self.inner_run();
}
fn done(&self) -> result::Result<(), nsresult> {
let callback = self.callback.get().unwrap();
match mem::replace(
&mut *self.result.borrow_mut(),
Err(Error::DidNotRun(Self::name()).into()),
) {
Ok(envelopes) => {
let result = envelopes
.into_iter()
.map(nsCString::from)
.collect::<ThinVec<_>>();
unsafe { callback.HandleSuccess(&result) }
}
Err(err) => {
let mut message = nsCString::new();
write!(message, "{err}").unwrap();
unsafe { callback.HandleError(NS_ERROR_FAILURE, &*message) }
}
}
.to_result()
}
}

Просмотреть файл

@ -124,6 +124,25 @@ class BridgedRecord extends RawCryptoWrapper {
}
}
class BridgeError extends Error {
constructor(code, message) {
super(message);
this.name = "BridgeError";
// TODO: We may want to use a different name for this, since errors with
// a `result` property are treated specially by telemetry, discarding the
// message...but, unlike other `nserror`s, the message is actually useful,
// and we still want to capture it.
this.result = code;
}
}
class InterruptedError extends Error {
constructor(message) {
super(message);
this.name = "InterruptedError";
}
}
/**
* Adapts a `Log.sys.mjs` logger to a `mozIServicesLogSink`. This class is copied
* from `SyncedBookmarksMirror.sys.mjs`.
@ -167,11 +186,114 @@ export class LogAdapter {
}
}
// This converts the XPCOM-defined, callback-based mozIBridgedSyncEngine to
// a promise-based implementation.
export class BridgeWrapperXPCOM {
constructor(component) {
this.comp = component;
}
// A few sync, non-callback based attributes.
get storageVersion() {
return this.comp.storageVersion;
}
get allowSkippedRecord() {
return this.comp.allowSkippedRecord;
}
get logger() {
return this.comp.logger;
}
// And the async functions we promisify.
// Note this is `lastSync` via uniffi but `getLastSync` via xpcom
lastSync() {
return BridgeWrapperXPCOM.#promisify(this.comp.getLastSync);
}
setLastSync(lastSyncMillis) {
return BridgeWrapperXPCOM.#promisify(this.comp.setLastSync, lastSyncMillis);
}
getSyncId() {
return BridgeWrapperXPCOM.#promisify(this.comp.getSyncId);
}
resetSyncId() {
return BridgeWrapperXPCOM.#promisify(this.comp.resetSyncId);
}
ensureCurrentSyncId(newSyncId) {
return BridgeWrapperXPCOM.#promisify(
this.comp.ensureCurrentSyncId,
newSyncId
);
}
syncStarted() {
return BridgeWrapperXPCOM.#promisify(this.comp.syncStarted);
}
storeIncoming(incomingEnvelopesAsJSON) {
return BridgeWrapperXPCOM.#promisify(
this.comp.storeIncoming,
incomingEnvelopesAsJSON
);
}
apply() {
return BridgeWrapperXPCOM.#promisify(this.comp.apply);
}
setUploaded(newTimestampMillis, uploadedIds) {
return BridgeWrapperXPCOM.#promisify(
this.comp.setUploaded,
newTimestampMillis,
uploadedIds
);
}
syncFinished() {
return BridgeWrapperXPCOM.#promisify(this.comp.syncFinished);
}
reset() {
return BridgeWrapperXPCOM.#promisify(this.comp.reset);
}
wipe() {
return BridgeWrapperXPCOM.#promisify(this.comp.wipe);
}
// Converts a XPCOM bridged function that takes a callback into one that returns a
// promise.
static #promisify(func, ...params) {
return new Promise((resolve, reject) => {
func(...params, {
// This object implicitly implements all three callback interfaces
// (`mozIBridgedSyncEngine{Apply, Result}Callback`), because they have
// the same methods. The only difference is the type of the argument
// passed to `handleSuccess`, which doesn't matter in JS.
handleSuccess: resolve,
handleError(code, message) {
reject(transformError(code, message));
},
});
});
}
}
/**
* A base class used to plug a Rust engine into Sync, and have it work like any
* other engine. The constructor takes a bridge as its first argument, which is
* a "bridged sync engine", as defined by UniFFI in the application-services
* crate.
* For backwards compatibility, this can also be an instance of an XPCOM
* component class that implements `mozIBridgedSyncEngine`, wrapped in
* a `BridgeWrapperXPCOM` wrapper.
* (Note that at time of writing, the above is slightly aspirational; the
* actual definition of the UniFFI shared bridged engine is still in flux.)
*
* This class inherits from `SyncEngine`, which has a lot of machinery that we
* don't need, but that's fairly easy to override. It would be harder to
@ -365,3 +487,13 @@ BridgedEngine.prototype = {
},
};
Object.setPrototypeOf(BridgedEngine.prototype, SyncEngine.prototype);
function transformError(code, message) {
switch (code) {
case Cr.NS_ERROR_ABORT:
return new InterruptedError(message);
default:
return new BridgeError(code, message);
}
}

Просмотреть файл

@ -2,11 +2,11 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
const STORAGE_VERSION = 1; // This needs to be kept in-sync with the rust storage version
import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
import {
BridgedEngine,
BridgeWrapperXPCOM,
LogAdapter,
} from "resource://services-sync/bridged_engine.sys.mjs";
import { SyncEngine, Tracker } from "resource://services-sync/engines.sys.mjs";
@ -15,16 +15,22 @@ const lazy = {};
ChromeUtils.defineESModuleGetters(lazy, {
MULTI_DEVICE_THRESHOLD: "resource://services-sync/constants.sys.mjs",
Observers: "resource://services-common/observers.sys.mjs",
SCORE_INCREMENT_MEDIUM: "resource://services-sync/constants.sys.mjs",
Svc: "resource://services-sync/util.sys.mjs",
extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.sys.mjs",
storageSyncService:
"resource://gre/modules/ExtensionStorageComponents.sys.mjs",
extensionStorageSyncKinto:
"resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs",
});
XPCOMUtils.defineLazyServiceGetter(
lazy,
"StorageSyncService",
"@mozilla.org/extensions/storage/sync;1",
"nsIInterfaceRequestor"
);
const PREF_FORCE_ENABLE = "engine.extension-storage.force";
// A helper to indicate whether extension-storage is enabled - it's based on
@ -63,7 +69,11 @@ function setEngineEnabled(enabled) {
// A "bridged engine" to our webext-storage component.
export function ExtensionStorageEngineBridge(service) {
this.component = lazy.StorageSyncService.getInterface(
Ci.mozIBridgedSyncEngine
);
BridgedEngine.call(this, "Extension-Storage", service);
this._bridge = new BridgeWrapperXPCOM(this.component);
let app_services_logger = Cc["@mozilla.org/appservices/logger;1"].getService(
Ci.mozIAppServicesLogger
@ -78,44 +88,78 @@ ExtensionStorageEngineBridge.prototype = {
// Used to override the engine name in telemetry, so that we can distinguish .
overrideTelemetryName: "rust-webext-storage",
async initialize() {
await SyncEngine.prototype.initialize.call(this);
this._rustStore = await lazy.storageSyncService.getStorageAreaInstance();
this._bridge = await this._rustStore.bridgedEngine();
// Uniffi currently only supports async methods, so we'll need to hardcode
// these values for now (which is fine for now as these hardly ever change)
this._bridge.storageVersion = STORAGE_VERSION;
this._bridge.allowSkippedRecord = true;
this._bridge.getSyncId = async () => {
let syncID = await this._bridge.syncId();
return syncID;
};
this._log.info("Got a bridged engine!");
this._tracker.modified = true;
_notifyPendingChanges() {
return new Promise(resolve => {
this.component
.QueryInterface(Ci.mozISyncedExtensionStorageArea)
.fetchPendingSyncChanges({
QueryInterface: ChromeUtils.generateQI([
"mozIExtensionStorageListener",
"mozIExtensionStorageCallback",
]),
onChanged: (extId, json) => {
try {
lazy.extensionStorageSync.notifyListeners(
extId,
JSON.parse(json)
);
} catch (ex) {
this._log.warn(
`Error notifying change listeners for ${extId}`,
ex
);
}
},
handleSuccess: resolve,
handleError: (code, message) => {
this._log.warn(
"Error fetching pending synced changes",
message,
code
);
resolve();
},
});
});
},
async _notifyPendingChanges() {
try {
let changeSets = await this._rustStore.getSyncedChanges();
_takeMigrationInfo() {
return new Promise(resolve => {
this.component
.QueryInterface(Ci.mozIExtensionStorageArea)
.takeMigrationInfo({
QueryInterface: ChromeUtils.generateQI([
"mozIExtensionStorageCallback",
]),
handleSuccess: result => {
resolve(result ? JSON.parse(result) : null);
},
handleError: (code, message) => {
this._log.warn("Error fetching migration info", message, code);
// `takeMigrationInfo` doesn't actually perform the migration,
// just reads (and clears) any data stored in the DB from the
// previous migration.
//
// Any errors here are very likely occurring a good while
// after the migration ran, so we just warn and pretend
// nothing was there.
resolve(null);
},
});
});
},
changeSets.forEach(changeSet => {
try {
lazy.extensionStorageSync.notifyListeners(
changeSet.extId,
JSON.parse(changeSet.changes)
);
} catch (ex) {
this._log.warn(
`Error notifying change listeners for ${changeSet.extId}`,
ex
);
}
});
} catch (ex) {
this._log.warn("Error fetching pending synced changes", ex);
async _syncStartup() {
let result = await super._syncStartup();
let info = await this._takeMigrationInfo();
if (info) {
lazy.Observers.notify(
"weave:telemetry:migration",
info,
"webext-storage"
);
}
return result;
},
async _processIncoming() {

Просмотреть файл

@ -1,13 +1,24 @@
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
const { BridgedEngine } = ChromeUtils.importESModule(
const { BridgedEngine, BridgeWrapperXPCOM } = ChromeUtils.importESModule(
"resource://services-sync/bridged_engine.sys.mjs"
);
const { Service } = ChromeUtils.importESModule(
"resource://services-sync/service.sys.mjs"
);
// Wraps an `object` in a proxy so that its methods are bound to it. This
// simulates how XPCOM class instances have all their methods bound.
function withBoundMethods(object) {
return new Proxy(object, {
get(target, key) {
let value = target[key];
return typeof value == "function" ? value.bind(target) : value;
},
});
}
add_task(async function test_interface() {
class TestBridge {
constructor() {
@ -28,32 +39,35 @@ add_task(async function test_interface() {
// `mozIBridgedSyncEngine` methods.
lastSync() {
return this.lastSyncMillis;
getLastSync(callback) {
CommonUtils.nextTick(() => callback.handleSuccess(this.lastSyncMillis));
}
setLastSync(millis) {
setLastSync(millis, callback) {
this.lastSyncMillis = millis;
CommonUtils.nextTick(() => callback.handleSuccess());
}
resetSyncId() {
return this.syncID;
resetSyncId(callback) {
CommonUtils.nextTick(() => callback.handleSuccess(this.syncID));
}
ensureCurrentSyncId(newSyncId) {
ensureCurrentSyncId(newSyncId, callback) {
equal(newSyncId, this.syncID, "Local and new sync IDs should match");
return this.syncID;
CommonUtils.nextTick(() => callback.handleSuccess(this.syncID));
}
syncStarted() {
syncStarted(callback) {
this.wasSyncStarted = true;
CommonUtils.nextTick(() => callback.handleSuccess());
}
storeIncoming(envelopes) {
storeIncoming(envelopes, callback) {
this.incomingEnvelopes.push(...envelopes.map(r => JSON.parse(r)));
CommonUtils.nextTick(() => callback.handleSuccess());
}
apply() {
apply(callback) {
let outgoingEnvelopes = [
{
id: "hanson",
@ -75,31 +89,35 @@ add_task(async function test_interface() {
payload: JSON.stringify(cleartext),
})
);
return outgoingEnvelopes;
CommonUtils.nextTick(() => callback.handleSuccess(outgoingEnvelopes));
}
setUploaded(millis, ids) {
setUploaded(millis, ids, callback) {
this.uploadedIDs.push(...ids);
CommonUtils.nextTick(() => callback.handleSuccess());
}
syncFinished() {
syncFinished(callback) {
this.wasSyncFinished = true;
CommonUtils.nextTick(() => callback.handleSuccess());
}
reset() {
reset(callback) {
this.clear();
this.wasReset = true;
CommonUtils.nextTick(() => callback.handleSuccess());
}
wipe() {
wipe(callback) {
this.clear();
this.wasWiped = true;
CommonUtils.nextTick(() => callback.handleSuccess());
}
}
let bridge = new TestBridge();
let engine = new BridgedEngine("Nineties", Service);
engine._bridge = bridge;
engine._bridge = new BridgeWrapperXPCOM(withBoundMethods(bridge));
engine.enabled = true;
let server = await serverForFoo(engine);

Просмотреть файл

@ -13,6 +13,10 @@ const { ExtensionStorageEngineBridge, ExtensionStorageEngineKinto } =
"resource://services-sync/engines/extension-storage.sys.mjs"
);
const { BridgeWrapperXPCOM } = ChromeUtils.importESModule(
"resource://services-sync/bridged_engine.sys.mjs"
);
Services.prefs.setStringPref("webextensions.storage.sync.log.level", "debug");
add_task(async function test_switching_between_kinto_and_bridged() {
@ -103,7 +107,6 @@ add_task(async function test_enable() {
add_task(async function test_notifyPendingChanges() {
let engine = new ExtensionStorageEngineBridge(Service);
await engine.initialize();
let extension = { id: "ext-1" };
let expectedChange = {
@ -114,43 +117,56 @@ add_task(async function test_notifyPendingChanges() {
let lastSync = 0;
let syncID = Utils.makeGUID();
let error = null;
engine._rustStore = {
getSyncedChanges() {
if (error) {
throw new Error(error.message);
} else {
return [
{ extId: extension.id, changes: JSON.stringify(expectedChange) },
];
}
},
};
engine._bridge = {
ensureCurrentSyncId(id) {
engine.component = {
QueryInterface: ChromeUtils.generateQI([
"mozIBridgedSyncEngine",
"mozIExtensionStorageArea",
"mozISyncedExtensionStorageArea",
]),
ensureCurrentSyncId(id, callback) {
if (syncID != id) {
syncID = id;
lastSync = 0;
}
return id;
callback.handleSuccess(id);
},
resetSyncId() {
return syncID;
resetSyncId(callback) {
callback.handleSuccess(syncID);
},
syncStarted() {},
lastSync() {
return lastSync;
syncStarted(callback) {
callback.handleSuccess();
},
setLastSync(lastSyncMillis) {
getLastSync(callback) {
callback.handleSuccess(lastSync);
},
setLastSync(lastSyncMillis, callback) {
lastSync = lastSyncMillis;
callback.handleSuccess();
},
apply() {
return [];
apply(callback) {
callback.handleSuccess([]);
},
fetchPendingSyncChanges(callback) {
if (error) {
callback.handleError(Cr.NS_ERROR_FAILURE, error.message);
} else {
callback.onChanged(extension.id, JSON.stringify(expectedChange));
callback.handleSuccess();
}
},
setUploaded(modified, ids, callback) {
callback.handleSuccess();
},
syncFinished(callback) {
callback.handleSuccess();
},
takeMigrationInfo(callback) {
callback.handleSuccess(null);
},
setUploaded(_modified, _ids) {},
syncFinished() {},
};
engine._bridge = new BridgeWrapperXPCOM(engine.component);
let server = await serverForFoo(engine);
let actualChanges = [];

Просмотреть файл

@ -119,7 +119,7 @@ add_task(async function test_calling_sync_calls_ext_storage_sync() {
returns: Promise.resolve(),
}));
try {
await withContext(async function (context) {
await withSyncContext(async function (context) {
// Set something so that everyone knows that we're using storage.sync
await extensionStorageSync.set(extension, { a: "b" }, context);
let ping = await sync_engine_and_validate_telem(engine, false);

Просмотреть файл

@ -0,0 +1,81 @@
/* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/ */
// Import the rust-based and kinto-based implementations. Not great to grab
// these as they're somewhat private, but we want to run the pings through our
// validation machinery which is here in the sync test code.
const { extensionStorageSync: rustImpl } = ChromeUtils.importESModule(
"resource://gre/modules/ExtensionStorageSync.sys.mjs"
);
const { extensionStorageSyncKinto: kintoImpl } = ChromeUtils.importESModule(
"resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs"
);
const { Service } = ChromeUtils.importESModule(
"resource://services-sync/service.sys.mjs"
);
const { ExtensionStorageEngineBridge } = ChromeUtils.importESModule(
"resource://services-sync/engines/extension-storage.sys.mjs"
);
Services.prefs.setBoolPref("webextensions.storage.sync.kinto", false);
Services.prefs.setStringPref("webextensions.storage.sync.log.level", "debug");
// It's tricky to force error cases here (the databases are opened with
// exclusive locks) and that part of the code has coverage in the vendored
// application-services webext-storage crate. So this just tests that the
// migration data ends up in the ping, and exactly once.
add_task(async function test_sync_migration_telem() {
// Set some stuff using the kinto-based impl prior to fully setting up sync.
let e1 = { id: "test@mozilla.com" };
let c1 = { extension: e1, callOnClose() {} };
let e2 = { id: "test-2@mozilla.com" };
let c2 = { extension: e2, callOnClose() {} };
await kintoImpl.set(e1, { foo: "bar" }, c1);
await kintoImpl.set(e1, { baz: "quux" }, c1);
await kintoImpl.set(e2, { second: "2nd" }, c2);
Assert.deepEqual(await rustImpl.get(e1, "foo", c1), { foo: "bar" });
Assert.deepEqual(await rustImpl.get(e1, "baz", c1), { baz: "quux" });
Assert.deepEqual(await rustImpl.get(e2, null, c2), { second: "2nd" });
// Explicitly unregister first. It's very possible this isn't needed for this
// case, however it's fairly harmless, we hope to uplift this patch to beta,
// and earlier today we had beta-only problems caused by this (bug 1629116)
await Service.engineManager.unregister("extension-storage");
await Service.engineManager.register(ExtensionStorageEngineBridge);
let engine = Service.engineManager.get("extension-storage");
let server = await serverForFoo(engine, undefined);
try {
await SyncTestingInfrastructure(server);
await Service.engineManager.switchAlternatives();
_("First sync");
let ping = await sync_engine_and_validate_telem(engine, false, null, true);
Assert.deepEqual(ping.migrations, [
{
type: "webext-storage",
entries: 3,
entriesSuccessful: 3,
extensions: 2,
extensionsSuccessful: 2,
openFailure: false,
},
]);
// force another sync
await engine.setLastSync(0);
_("Second sync");
ping = await sync_engine_and_validate_telem(engine, false, null, true);
Assert.deepEqual(ping.migrations, undefined);
} finally {
await kintoImpl.clear(e1, c1);
await kintoImpl.clear(e2, c2);
await rustImpl.clear(e1, c1);
await rustImpl.clear(e2, c2);
await promiseStopServer(server);
await engine.finalize();
}
});

Просмотреть файл

@ -29,13 +29,13 @@ add_task(async function test_changing_extension_storage_changes_score() {
const tracker = engine._tracker;
const extension = { id: "my-extension-id" };
tracker.start();
await withContext(async function (context) {
await withSyncContext(async function (context) {
await extensionStorageSync.set(extension, { a: "b" }, context);
});
Assert.equal(tracker.score, SCORE_INCREMENT_MEDIUM);
tracker.resetScore();
await withContext(async function (context) {
await withSyncContext(async function (context) {
await extensionStorageSync.remove(extension, "a", context);
});
Assert.equal(tracker.score, SCORE_INCREMENT_MEDIUM);

Просмотреть файл

@ -121,6 +121,10 @@ run-sequentially = "extension-storage migration happens only once, and must be t
skip-if = ["appname == 'thunderbird'"]
run-sequentially = "extension-storage migration happens only once, and must be tested first."
["test_extension_storage_migration_telem.js"]
skip-if = ["appname == 'thunderbird'"]
run-sequentially = "extension-storage migration happens only once, and must be tested first."
["test_extension_storage_tracker_kinto.js"]
skip-if = ["appname == 'thunderbird'"]

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"81b4e03b6df32859fabe75be2a86051e4646b2eac61d610323beb780b7f7574b","README.md":"821cac7eb5b963fc3f3fe21dd890427ab2bbf335cb25cbae89b713b3350687c5","build.rs":"92f7d380f3d8fab1e6d80276915af57192e276321d132a5f800ea4520e9cb469","sql/create_schema.sql":"a17311a407ec10e033886b7125da4c8b84bc6d761f6b28edc9594de430e1d964","sql/create_sync_temp_tables.sql":"860ede362c94feb47d85522553fa2852f9bdb9f9b025d6438dd5dee3d4acd527","sql/tests/create_schema_v1.sql":"77cf0c90eaac3e1aea626537147e1b8ec349b68d6076c92fa7ae402aac613050","src/api.rs":"b3f0ff950178d006e443ddbeec4513e0acaa8894211053cfdfc1de104b9fb6ab","src/db.rs":"22fa988744beca27adb34670eadf926fa288c8362a74869608e23e3563e24a62","src/error.rs":"10d99e3dc6a38951456c0fac7e46fb4f441e976b47fdddea257badbc66b8702d","src/ffi.rs":"f66a81393bebe7a4b7e7960cb426df106ff1f02bfebcaa6e335b4b8b56c5c936","src/lib.rs":"259fbbfe5b60fc1e43ef4cfe08fb131d6c1c42c49fee74a3b687e00ac91c361a","src/migration.rs":"8d92f82b2ba38e1039fd054c8c75078a6b896a0d3cdc1a52571456b25a32c9c3","src/schema.rs":"d8dd8f66cad71e3e369722734e0d5d16fd9423d5f6a5abba1854a27e1e814724","src/store.rs":"d53b74659cc41ba66f6acc91b831c45d0c563c7299308ceb62c7cb4e74ff3e9a","src/sync/bridge.rs":"2b998b18516e6477b7fa29d24b725adcd6ea70f483eca9de1bdad4eda39209a5","src/sync/incoming.rs":"dd77c64e2ade4f39cba258decab6d3db8ad0b5f513aa018efbd56b9869a021d9","src/sync/mod.rs":"bc396eecf80132df5c4bc779e77bc4782f0dcfc75ce27260aa0953caf38d3733","src/sync/outgoing.rs":"dacb77b956f2546fd60a89367927a199d9b662b17201d0781145f7405b61fdce","src/sync/sync_tests.rs":"bc9845312c7b08c5efd892979f61e9385b553f872a6c5d78600f4587b14421f5","src/webext-storage.udl":"9c0614c00e0558ebf5643c29c2aa230ec896103f84224e0d46ab6aa4cd99a788","uniffi.toml":"beeec89c2f877eb89be0090dc304dbc7c74e787385e7459bad78c6165bb66791"},"package":null}
{"files":{"Cargo.toml":"81b4e03b6df32859fabe75be2a86051e4646b2eac61d610323beb780b7f7574b","README.md":"821cac7eb5b963fc3f3fe21dd890427ab2bbf335cb25cbae89b713b3350687c5","build.rs":"92f7d380f3d8fab1e6d80276915af57192e276321d132a5f800ea4520e9cb469","sql/create_schema.sql":"a17311a407ec10e033886b7125da4c8b84bc6d761f6b28edc9594de430e1d964","sql/create_sync_temp_tables.sql":"860ede362c94feb47d85522553fa2852f9bdb9f9b025d6438dd5dee3d4acd527","sql/tests/create_schema_v1.sql":"77cf0c90eaac3e1aea626537147e1b8ec349b68d6076c92fa7ae402aac613050","src/api.rs":"b3f0ff950178d006e443ddbeec4513e0acaa8894211053cfdfc1de104b9fb6ab","src/db.rs":"04ef67021b6aad7552a268397c7323302c4f619b3fb07fb140132beb8b37f8b5","src/error.rs":"8587813be8e2a7f5efad4216a5c4686554ed44e98cf94bfd9c2f2c9adc8e9a11","src/ffi.rs":"f66a81393bebe7a4b7e7960cb426df106ff1f02bfebcaa6e335b4b8b56c5c936","src/lib.rs":"ab25e7c6ea67fb905fe6dad866c0d2c462b1e93bcff283db947513aeabbb2d73","src/migration.rs":"8d92f82b2ba38e1039fd054c8c75078a6b896a0d3cdc1a52571456b25a32c9c3","src/schema.rs":"d8dd8f66cad71e3e369722734e0d5d16fd9423d5f6a5abba1854a27e1e814724","src/store.rs":"d208689c46fb97cd2c60a0c610ba1998a7132fb50fffa2eefa1d6b169b7c34f0","src/sync/bridge.rs":"996de05beb2904f84b3cbfc9ef85c4844078fdb4867d9068390d496156bee614","src/sync/incoming.rs":"dd77c64e2ade4f39cba258decab6d3db8ad0b5f513aa018efbd56b9869a021d9","src/sync/mod.rs":"05da064e1bc2cc449c806a534842da92d8d4b24a919f2dff2e88dc69f3e926a5","src/sync/outgoing.rs":"dacb77b956f2546fd60a89367927a199d9b662b17201d0781145f7405b61fdce","src/sync/sync_tests.rs":"bc9845312c7b08c5efd892979f61e9385b553f872a6c5d78600f4587b14421f5","src/webext-storage.udl":"0341d431ba837cf64ea210ef6157010c6664a0b5a194e89acb0414938636b391","uniffi.toml":"beeec89c2f877eb89be0090dc304dbc7c74e787385e7459bad78c6165bb66791"},"package":null}

1
third_party/rust/webext-storage/src/db.rs поставляемый
Просмотреть файл

@ -120,7 +120,6 @@ impl ThreadSafeStorageDb {
Arc::clone(&self.interrupt_handle)
}
#[allow(dead_code)]
pub fn begin_interrupt_scope(&self) -> Result<SqlInterruptScope> {
Ok(self.interrupt_handle.begin_interrupt_scope()?)
}

Просмотреть файл

@ -143,11 +143,3 @@ impl From<serde_json::Error> for WebExtStorageApiError {
}
}
}
impl From<anyhow::Error> for WebExtStorageApiError {
fn from(value: anyhow::Error) -> Self {
WebExtStorageApiError::UnexpectedError {
reason: value.to_string(),
}
}
}

15
third_party/rust/webext-storage/src/lib.rs поставляемый
Просмотреть файл

@ -25,7 +25,6 @@ pub use api::SYNC_QUOTA_BYTES_PER_ITEM;
pub use crate::error::{QuotaReason, WebExtStorageApiError};
pub use crate::store::WebExtStorageStore;
pub use crate::sync::{bridge::WebExtStorageBridgedEngine, SyncedExtensionChange};
pub use api::UsageInfo;
pub use api::{StorageChanges, StorageValueChange};
@ -43,17 +42,3 @@ impl UniffiCustomTypeConverter for JsonValue {
obj.to_string()
}
}
// Our UDL uses a `Guid` type.
use sync_guid::Guid;
impl UniffiCustomTypeConverter for Guid {
type Builtin = String;
fn into_custom(val: Self::Builtin) -> uniffi::Result<Guid> {
Ok(Guid::new(val.as_str()))
}
fn from_custom(obj: Self) -> Self::Builtin {
obj.into()
}
}

11
third_party/rust/webext-storage/src/store.rs поставляемый
Просмотреть файл

@ -29,7 +29,7 @@ use serde_json::Value as JsonValue;
/// connection with our sync engines - ie, these engines also hold an Arc<>
/// around the same object.
pub struct WebExtStorageStore {
pub(crate) db: Arc<ThreadSafeStorageDb>,
db: Arc<ThreadSafeStorageDb>,
}
impl WebExtStorageStore {
@ -119,9 +119,14 @@ impl WebExtStorageStore {
/// Returns the bytes in use for the specified items (which can be null,
/// a string, or an array)
pub fn get_bytes_in_use(&self, ext_id: &str, keys: JsonValue) -> Result<u64> {
pub fn get_bytes_in_use(&self, ext_id: &str, keys: JsonValue) -> Result<usize> {
let db = self.db.lock();
Ok(api::get_bytes_in_use(&db, ext_id, keys)? as u64)
api::get_bytes_in_use(&db, ext_id, keys)
}
/// Returns a bridged sync engine for Desktop for this store.
pub fn bridged_engine(&self) -> sync::BridgedEngine {
sync::BridgedEngine::new(&self.db)
}
/// Closes the store and its database connection. See the docs for

Просмотреть файл

@ -5,30 +5,18 @@
use anyhow::Result;
use rusqlite::Transaction;
use std::sync::{Arc, Weak};
use sync15::bso::{IncomingBso, OutgoingBso};
use sync15::engine::{ApplyResults, BridgedEngine as Sync15BridgedEngine};
use sync15::bso::IncomingBso;
use sync15::engine::ApplyResults;
use sync_guid::Guid as SyncGuid;
use crate::db::{delete_meta, get_meta, put_meta, ThreadSafeStorageDb};
use crate::schema;
use crate::sync::incoming::{apply_actions, get_incoming, plan_incoming, stage_incoming};
use crate::sync::outgoing::{get_outgoing, record_uploaded, stage_outgoing};
use crate::WebExtStorageStore;
const LAST_SYNC_META_KEY: &str = "last_sync_time";
const SYNC_ID_META_KEY: &str = "sync_id";
impl WebExtStorageStore {
// Returns a bridged sync engine for this store.
pub fn bridged_engine(self: Arc<Self>) -> Arc<WebExtStorageBridgedEngine> {
let engine = Box::new(BridgedEngine::new(&self.db));
let bridged_engine = WebExtStorageBridgedEngine {
bridge_impl: engine,
};
Arc::new(bridged_engine)
}
}
/// A bridged engine implements all the methods needed to make the
/// `storage.sync` store work with Desktop's Sync implementation.
/// Conceptually, it's similar to `sync15::Store`, which we
@ -66,7 +54,7 @@ impl BridgedEngine {
}
}
impl Sync15BridgedEngine for BridgedEngine {
impl sync15::engine::BridgedEngine for BridgedEngine {
fn last_sync(&self) -> Result<i64> {
let shared_db = self.thread_safe_storage_db()?;
let db = shared_db.lock();
@ -194,88 +182,6 @@ impl Sync15BridgedEngine for BridgedEngine {
}
}
pub struct WebExtStorageBridgedEngine {
bridge_impl: Box<dyn Sync15BridgedEngine>,
}
impl WebExtStorageBridgedEngine {
pub fn new(bridge_impl: Box<dyn Sync15BridgedEngine>) -> Self {
Self { bridge_impl }
}
pub fn last_sync(&self) -> Result<i64> {
self.bridge_impl.last_sync()
}
pub fn set_last_sync(&self, last_sync: i64) -> Result<()> {
self.bridge_impl.set_last_sync(last_sync)
}
pub fn sync_id(&self) -> Result<Option<String>> {
self.bridge_impl.sync_id()
}
pub fn reset_sync_id(&self) -> Result<String> {
self.bridge_impl.reset_sync_id()
}
pub fn ensure_current_sync_id(&self, sync_id: &str) -> Result<String> {
self.bridge_impl.ensure_current_sync_id(sync_id)
}
pub fn prepare_for_sync(&self, client_data: &str) -> Result<()> {
self.bridge_impl.prepare_for_sync(client_data)
}
pub fn store_incoming(&self, incoming: Vec<String>) -> Result<()> {
self.bridge_impl
.store_incoming(self.convert_incoming_bsos(incoming)?)
}
pub fn apply(&self) -> Result<Vec<String>> {
let apply_results = self.bridge_impl.apply()?;
self.convert_outgoing_bsos(apply_results.records)
}
pub fn set_uploaded(&self, server_modified_millis: i64, guids: Vec<SyncGuid>) -> Result<()> {
self.bridge_impl
.set_uploaded(server_modified_millis, &guids)
}
pub fn sync_started(&self) -> Result<()> {
self.bridge_impl.sync_started()
}
pub fn sync_finished(&self) -> Result<()> {
self.bridge_impl.sync_finished()
}
pub fn reset(&self) -> Result<()> {
self.bridge_impl.reset()
}
pub fn wipe(&self) -> Result<()> {
self.bridge_impl.wipe()
}
fn convert_incoming_bsos(&self, incoming: Vec<String>) -> Result<Vec<IncomingBso>> {
let mut bsos = Vec::with_capacity(incoming.len());
for inc in incoming {
bsos.push(serde_json::from_str::<IncomingBso>(&inc)?);
}
Ok(bsos)
}
// Encode OutgoingBso's into JSON for UniFFI
fn convert_outgoing_bsos(&self, outgoing: Vec<OutgoingBso>) -> Result<Vec<String>> {
let mut bsos = Vec::with_capacity(outgoing.len());
for e in outgoing {
bsos.push(serde_json::to_string(&e)?);
}
Ok(bsos)
}
}
impl From<anyhow::Error> for crate::error::Error {
fn from(value: anyhow::Error) -> Self {
crate::error::Error::SyncError(value.to_string())

Просмотреть файл

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
pub(crate) mod bridge;
mod bridge;
mod incoming;
mod outgoing;
@ -17,6 +17,7 @@ use serde_derive::*;
use sql_support::ConnExt;
use sync_guid::Guid as SyncGuid;
pub use bridge::BridgedEngine;
use incoming::IncomingAction;
type JsonMap = serde_json::Map<String, serde_json::Value>;

Просмотреть файл

@ -5,9 +5,6 @@
[Custom]
typedef string JsonValue;
[Custom]
typedef string Guid;
namespace webextstorage {
};
@ -25,11 +22,6 @@ interface WebExtStorageApiError {
QuotaError(QuotaReason reason);
};
dictionary SyncedExtensionChange {
string ext_id;
string changes;
};
dictionary StorageValueChange {
string key;
JsonValue? old_value;
@ -40,9 +32,6 @@ dictionary StorageChanges {
sequence<StorageValueChange> changes;
};
// Note that the `close` function has been intentionally excluded from `WebExtStorageStore` because at present
// it is not necessary for our current use in mozilla central and converting `close` to pass a reference to
// the store resulted in errors.
interface WebExtStorageStore {
[Throws=WebExtStorageApiError]
constructor(string path);
@ -53,61 +42,9 @@ interface WebExtStorageStore {
[Throws=WebExtStorageApiError]
JsonValue get([ByRef] string ext_id, JsonValue keys);
[Throws=WebExtStorageApiError]
u64 get_bytes_in_use([ByRef] string ext_id, JsonValue keys);
[Throws=WebExtStorageApiError]
StorageChanges remove([ByRef] string ext_id, JsonValue keys);
[Throws=WebExtStorageApiError]
StorageChanges clear([ByRef] string ext_id);
[Self=ByArc]
WebExtStorageBridgedEngine bridged_engine();
[Throws=WebExtStorageApiError]
sequence<SyncedExtensionChange> get_synced_changes();
};
// Note the canonical docs for this are in https://github.com/mozilla/application-services/blob/main/components/sync15/src/engine/bridged_engine.rs
// NOTE: all timestamps here are milliseconds.
interface WebExtStorageBridgedEngine {
[Throws=WebExtStorageApiError]
i64 last_sync();
[Throws=WebExtStorageApiError]
void set_last_sync(i64 last_sync);
[Throws=WebExtStorageApiError]
string? sync_id();
[Throws=WebExtStorageApiError]
string reset_sync_id();
[Throws=WebExtStorageApiError]
string ensure_current_sync_id([ByRef]string new_sync_id);
[Throws=WebExtStorageApiError]
void prepare_for_sync([ByRef]string client_data);
[Throws=WebExtStorageApiError]
void sync_started();
[Throws=WebExtStorageApiError]
void store_incoming(sequence<string> incoming);
[Throws=WebExtStorageApiError]
sequence<string> apply();
[Throws=WebExtStorageApiError]
void set_uploaded(i64 server_modified_millis, sequence<Guid> guids);
[Throws=WebExtStorageApiError]
void sync_finished();
[Throws=WebExtStorageApiError]
void reset();
[Throws=WebExtStorageApiError]
void wipe();
};

Просмотреть файл

@ -4,107 +4,176 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
const STORAGE_SYNC_ENABLED_PREF = "webextensions.storage.sync.enabled";
import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
const NS_ERROR_DOM_QUOTA_EXCEEDED_ERR = 0x80530016;
/** @type {Lazy} */
const lazy = {};
ChromeUtils.defineESModuleGetters(lazy, {
ExtensionCommon: "resource://gre/modules/ExtensionCommon.sys.mjs",
ExtensionUtils: "resource://gre/modules/ExtensionUtils.sys.mjs",
storageSyncService:
"resource://gre/modules/ExtensionStorageComponents.sys.mjs",
QuotaError: "resource://gre/modules/RustWebextstorage.sys.mjs",
// We might end up falling back to kinto...
extensionStorageSyncKinto:
"resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs",
});
XPCOMUtils.defineLazyPreferenceGetter(
lazy,
"prefPermitsStorageSync",
STORAGE_SYNC_ENABLED_PREF,
true
);
// This xpcom service implements a "bridge" from the JS world to the Rust world.
// It sets up the database and implements a callback-based version of the
// browser.storage API.
ChromeUtils.defineLazyGetter(lazy, "storageSvc", () =>
Cc["@mozilla.org/extensions/storage/sync;1"]
.getService(Ci.nsIInterfaceRequestor)
.getInterface(Ci.mozIExtensionStorageArea)
);
// The interfaces which define the callbacks used by the bridge. There's a
// callback for success, failure, and to record data changes.
function ExtensionStorageApiCallback(resolve, reject, changeCallback) {
this.resolve = resolve;
this.reject = reject;
this.changeCallback = changeCallback;
}
ExtensionStorageApiCallback.prototype = {
QueryInterface: ChromeUtils.generateQI([
"mozIExtensionStorageListener",
"mozIExtensionStorageCallback",
]),
handleSuccess(result) {
this.resolve(result ? JSON.parse(result) : null);
},
handleError(code, message) {
/** @type {Error & { code?: number }} */
let e = new Error(message);
e.code = code;
Cu.reportError(e);
this.reject(e);
},
onChanged(extId, json) {
if (this.changeCallback && json) {
try {
this.changeCallback(extId, JSON.parse(json));
} catch (ex) {
Cu.reportError(ex);
}
}
},
};
// The backing implementation of the browser.storage.sync web extension API.
export class ExtensionStorageSync {
constructor() {
this.listeners = new Map();
// We are optimistic :) If we ever see the special nsresult which indicates
// migration failure, it will become false. In practice, this will only ever
// happen on the first operation.
this.migrationOk = true;
}
async #getRustStore() {
return await lazy.storageSyncService.getStorageAreaInstance();
}
async callRustStoreFn(fnName, extension, ...args) {
let sargs = args.map(val => JSON.stringify(val));
try {
let extId = extension.id;
let rustStore = await this.#getRustStore();
switch (fnName) {
case "set": {
let changes = this._parseRustStorageValueChangeList(
await rustStore.set(extId, ...sargs)
);
this.notifyListeners(extId, changes);
return null;
}
case "remove": {
let changes = this._parseRustStorageValueChangeList(
await rustStore.remove(extId, ...sargs)
);
this.notifyListeners(extId, changes);
return null;
}
case "clear": {
let changes = this._parseRustStorageValueChangeList(
await rustStore.clear(extId)
);
this.notifyListeners(extId, changes);
return null;
}
case "get": {
let result = await rustStore.get(extId, ...sargs);
return JSON.parse(result);
}
case "getBytesInUse": {
let result = await rustStore.getBytesInUse(extId, ...sargs);
return JSON.parse(result);
}
}
} catch (ex) {
// The only "public" exception here is for quota failure - all others
// are sanitized.
let sanitized =
ex instanceof lazy.QuotaError
? // The same message as the local IDB implementation
"QuotaExceededError: storage.sync API call exceeded its quota limitations."
: // The standard, generic extension error.
"An unexpected error occurred";
throw new lazy.ExtensionUtils.ExtensionError(sanitized);
// The main entry-point to our bridge. It performs some important roles:
// * Ensures the API is allowed to be used.
// * Works out what "extension id" to use.
// * Turns the callback API into a promise API.
async _promisify(fnName, extension, context, ...args) {
let extId = extension.id;
if (lazy.prefPermitsStorageSync !== true) {
throw new lazy.ExtensionUtils.ExtensionError(
`Please set ${STORAGE_SYNC_ENABLED_PREF} to true in about:config`
);
}
if (this.migrationOk) {
// We can call ours.
try {
return await new Promise((resolve, reject) => {
let callback = new ExtensionStorageApiCallback(
resolve,
reject,
(extId, changes) => this.notifyListeners(extId, changes)
);
let sargs = args.map(val => JSON.stringify(val));
lazy.storageSvc[fnName](extId, ...sargs, callback);
});
} catch (ex) {
if (ex.code != Cr.NS_ERROR_CANNOT_CONVERT_DATA) {
// Some non-migration related error we want to sanitize and propagate.
// The only "public" exception here is for quota failure - all others
// are sanitized.
let sanitized =
ex.code == NS_ERROR_DOM_QUOTA_EXCEEDED_ERR
? // The same message as the local IDB implementation
`QuotaExceededError: storage.sync API call exceeded its quota limitations.`
: // The standard, generic extension error.
"An unexpected error occurred";
throw new lazy.ExtensionUtils.ExtensionError(sanitized);
}
// This means "migrate failed" so we must fall back to kinto.
Cu.reportError(
"migration of extension-storage failed - will fall back to kinto"
);
this.migrationOk = false;
}
}
// We've detected failure to migrate, so we want to use kinto.
return lazy.extensionStorageSyncKinto[fnName](extension, ...args, context);
}
async set(extension, items) {
return await this.callRustStoreFn("set", extension, items);
set(extension, items, context) {
return this._promisify("set", extension, context, items);
}
async remove(extension, keys) {
return await this.callRustStoreFn("remove", extension, keys);
remove(extension, keys, context) {
return this._promisify("remove", extension, context, keys);
}
async clear(extension) {
return await this.callRustStoreFn("clear", extension);
clear(extension, context) {
return this._promisify("clear", extension, context);
}
async clearOnUninstall(extensionId) {
clearOnUninstall(extensionId) {
if (!this.migrationOk) {
// If the rust-based backend isn't being used,
// no need to clear it.
return;
}
// Resolve the returned promise once the request has been either resolved
// or rejected (and report the error on the browser console in case of
// unexpected clear failures on addon uninstall).
try {
let rustStore = await this.#getRustStore();
await rustStore.clear(extensionId);
} catch (err) {
Cu.reportError(err);
}
return new Promise(resolve => {
const callback = new ExtensionStorageApiCallback(
resolve,
err => {
Cu.reportError(err);
resolve();
},
// empty changeCallback (no need to notify the extension
// while clearing the extension on uninstall).
() => {}
);
lazy.storageSvc.clear(extensionId, callback);
});
}
async get(extension, spec) {
return await this.callRustStoreFn("get", extension, spec);
get(extension, spec, context) {
return this._promisify("get", extension, context, spec);
}
async getBytesInUse(extension, keys) {
return await this.callRustStoreFn("getBytesInUse", extension, keys);
getBytesInUse(extension, keys, context) {
return this._promisify("getBytesInUse", extension, context, keys);
}
addOnChangedListener(extension, listener) {
@ -121,23 +190,8 @@ export class ExtensionStorageSync {
}
}
_parseRustStorageValueChangeList(changeSets) {
let changes = {};
for (let change of changeSets.changes) {
changes[change.key] = {};
if (change.oldValue) {
changes[change.key].oldValue = JSON.parse(change.oldValue);
}
if (change.newValue) {
changes[change.key].newValue = JSON.parse(change.newValue);
}
}
return changes;
}
notifyListeners(extId, changes) {
let listeners = this.listeners.get(extId) || new Set();
if (listeners) {
for (let listener of listeners) {
lazy.ExtensionCommon.runSafeSyncWithoutClone(listener, changes);

Просмотреть файл

@ -15,6 +15,7 @@ const KINTO_PROD_SERVER_URL =
"https://webextensions.settings.services.mozilla.com/v1";
const KINTO_DEFAULT_SERVER_URL = KINTO_PROD_SERVER_URL;
const STORAGE_SYNC_ENABLED_PREF = "webextensions.storage.sync.enabled";
const STORAGE_SYNC_SERVER_URL_PREF = "webextensions.storage.sync.serverURL";
const STORAGE_SYNC_SCOPE = "sync:addon_storage";
const STORAGE_SYNC_CRYPTO_COLLECTION_NAME = "storage-sync-crypto";
@ -62,6 +63,12 @@ ChromeUtils.defineLazyGetter(lazy, "fxAccounts", () => {
).getFxAccountsSingleton();
});
XPCOMUtils.defineLazyPreferenceGetter(
lazy,
"prefPermitsStorageSync",
STORAGE_SYNC_ENABLED_PREF,
true
);
XPCOMUtils.defineLazyPreferenceGetter(
lazy,
"prefStorageSyncServerURL",
@ -1196,6 +1203,11 @@ export class ExtensionStorageSyncKinto {
* @returns {Promise<Collection>}
*/
getCollection(extension, context) {
if (lazy.prefPermitsStorageSync !== true) {
return Promise.reject({
message: `Please set ${STORAGE_SYNC_ENABLED_PREF} to true in about:config`,
});
}
this.registerInUse(extension, context);
return openCollection(extension);
}

Просмотреть файл

@ -7,9 +7,27 @@ This document describes the implementation of the the `storage.sync` part of the
<https://developer.mozilla.org/docs/Mozilla/Add-ons/WebExtensions/API/storage>`_.
The implementation lives in the `toolkit/components/extensions/storage folder <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage>`_
Ideally you would already know about Rust and XPCOM - `see this doc for more details <../../../../writing-rust-code/index.html>`_
At a very high-level, the system looks like:
.. mermaid::
graph LR
A[Extensions API]
A --> B[Storage JS API]
B --> C{magic}
C --> D[app-services component]
Where "magic" is actually the most interesting part and the primary focus of this document.
Note: The general mechanism described below is also used for other Rust components from the
app-services team - for example, "dogear" uses a similar mechanism, and the sync engines
too (but with even more complexity) to manage the threads. Unfortunately, at time of writing,
no code is shared and it's not clear how we would, but this might change as more Rust lands.
The app-services component `lives on github <https://github.com/mozilla/application-services/blob/main/components/webext-storage>`_.
There are docs that describe `how to update/vendor this (and all) external rust code <../../../../build/buildsystem/rust.html>`_ you might be interested in.
We use UniFFI to generate JS bindings for the components. More details about UniFFI can be found in `these docs <https://searchfox.org/mozilla-central/source/docs/writing-rust-code/uniffi.md>`_.
To set the scene, let's look at the parts exposed to WebExtensions first; there are lots of
moving part there too.
@ -19,7 +37,12 @@ WebExtension API
The WebExtension API is owned by the addons team. The implementation of this API is quite complex
as it involves multiple processes, but for the sake of this document, we can consider the entry-point
into the WebExtension Storage API as being `parent/ext-storage.js <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/parent/ext-storage.js>`_.
into the WebExtension Storage API as being `parent/ext-storage.js <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/parent/ext-storage.js>`_
This entry-point ends up using the implementation in the
`ExtensionStorageSync JS class <https://searchfox.org/mozilla-central/rev/9028b0458cc1f432870d2996b186b0938dda734a/toolkit/components/extensions/ExtensionStorageSync.jsm#84>`_.
This class/module has complexity for things like migration from the earlier Kinto-based backend,
but importantly, code to adapt a callback API into a promise based one.
Overview of the API
###################
@ -34,3 +57,171 @@ The semantics of the API are beyond this doc but are
As you will see in those docs, the API is promise-based, but the rust implementation is fully
synchronous and Rust knows nothing about Javascript promises - so this system converts
the callback-based API to a promise-based one.
xpcom as the interface to Rust
##############################
xpcom is old Mozilla technology that uses C++ "vtables" to implement "interfaces", which are
described in IDL files. While this traditionally was used to interface
C++ and Javascript, we are leveraging existing support for Rust. The interface we are
exposing is described in `mozIExtensionStorageArea.idl <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl>`_
The main interface of interest in this IDL file is `mozIExtensionStorageArea`.
This interface defines the functionality - and is the first layer in the sync to async model.
For example, this interface defines the following method:
.. code-block:: rust
interface mozIExtensionStorageArea : nsISupports {
...
// Sets one or more key-value pairs specified in `json` for the
// `extensionId`...
void set(in AUTF8String extensionId,
in AUTF8String json,
in mozIExtensionStorageCallback callback);
As you will notice, the 3rd arg is another interface, `mozIExtensionStorageCallback`, also
defined in that IDL file. This is a small, generic interface defined as:
.. code-block:: cpp
interface mozIExtensionStorageCallback : nsISupports {
// Called when the operation completes. Operations that return a result,
// like `get`, will pass a `UTF8String` variant. Those that don't return
// anything, like `set` or `remove`, will pass a `null` variant.
void handleSuccess(in nsIVariant result);
// Called when the operation fails.
void handleError(in nsresult code, in AUTF8String message);
};
Note that this delivers all results and errors, so must be capable of handling
every result type, which for some APIs may be problematic - but we are very lucky with this API
that this simple XPCOM callback interface is capable of reasonably representing the return types
from every function in the `mozIExtensionStorageArea` interface.
(There's another interface, `mozIExtensionStorageListener` which is typically
also implemented by the actual callback to notify the extension about changes,
but that's beyond the scope of this doc.)
*Note the thread model here is async* - the `set` call will return immediately, and later, on
the main thread, we will call the callback param with the result of the operation.
So under the hood, what happens is something like:
.. mermaid::
sequenceDiagram
Extension->>ExtensionStorageSync: call `set` and give me a promise
ExtensionStorageSync->>xpcom: call `set`, supplying new data and a callback
ExtensionStorageSync-->>Extension: your promise
xpcom->>xpcom: thread magic in the "bridge"
xpcom-->>ExtensionStorageSync: callback!
ExtensionStorageSync-->>Extension: promise resolved
So onto the thread magic in the bridge!
webext_storage_bridge
#####################
The `webext_storage_bridge <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge>`_
is a Rust crate which, as implied by the name, is a "bridge" between this Javascript/XPCOM world to
the actual `webext-storage <https://github.com/mozilla/application-services/tree/main/components/webext-storage>`_ crate.
lib.rs
------
Is the entry-point - it defines the xpcom "factory function" -
an `extern "C"` function which is called by xpcom to create the Rust object
implementing `mozIExtensionStorageArea` using existing gecko support.
area.rs
-------
This module defines the interface itself. For example, inside that file you will find:
.. code-block:: rust
impl StorageSyncArea {
...
xpcom_method!(
set => Set(
ext_id: *const ::nsstring::nsACString,
json: *const ::nsstring::nsACString,
callback: *const mozIExtensionStorageCallback
)
);
/// Sets one or more key-value pairs.
fn set(
&self,
ext_id: &nsACString,
json: &nsACString,
callback: &mozIExtensionStorageCallback,
) -> Result<()> {
self.dispatch(
Punt::Set {
ext_id: str::from_utf8(&*ext_id)?.into(),
value: serde_json::from_str(str::from_utf8(&*json)?)?,
},
callback,
)?;
Ok(())
}
Of interest here:
* `xpcom_method` is a Rust macro, and part of the existing xpcom integration which already exists
in gecko. It declares the xpcom vtable method described in the IDL.
* The `set` function is the implementation - it does string conversions and the JSON parsing
on the main thread, then does the work via the supplied callback param, `self.dispatch` and a `Punt`.
* The `dispatch` method dispatches to another thread, leveraging existing in-tree `moz_task <https://searchfox.org/mozilla-central/source/xpcom/rust/moz_task>`_ support, shifting the `Punt` to another thread and making the callback when done.
Punt
----
`Punt` is a whimsical name somewhat related to a "bridge" - it carries things across and back.
It is a fairly simple enum in `punt.rs <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs>`_.
It's really just a restatement of the API we expose suitable for moving across threads. In short, the `Punt` is created on the main thread,
then sent to the background thread where the actual operation runs via a `PuntTask` and returns a `PuntResult`.
There's a few dances that go on, but the end result is that `inner_run() <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs>`_
gets executed on the background thread - so for `Set`:
.. code-block:: rust
Punt::Set { ext_id, value } => {
PuntResult::with_change(&ext_id, self.store()?.get()?.set(&ext_id, value)?)?
}
Here, `self.store()` is a wrapper around the actual Rust implementation from app-services with
various initialization and mutex dances involved - see `store.rs`.
ie, this function is calling our Rust implementation and stashing the result in a `PuntResult`
The `PuntResult` is private to that file but is a simple struct that encapsulates both
the actual result of the function (also a set of changes to send to observers, but that's
beyond this doc).
Ultimately, the `PuntResult` ends up back on the main thread once the call is complete
and arranges to callback the JS implementation, which in turn resolves the promise created in `ExtensionStorageSync.sys.mjs`
End result:
-----------
.. mermaid::
sequenceDiagram
Extension->>ExtensionStorageSync: call `set` and give me a promise
ExtensionStorageSync->>xpcom - bridge main thread: call `set`, supplying new data and a callback
ExtensionStorageSync-->>Extension: your promise
xpcom - bridge main thread->>moz_task worker thread: Punt this
moz_task worker thread->>webext-storage: write this data to the database
webext-storage->>webext-storage: done: result/error and observers
webext-storage-->>moz_task worker thread: ...
moz_task worker thread-->>xpcom - bridge main thread: PuntResult
xpcom - bridge main thread-->>ExtensionStorageSync: callback!
ExtensionStorageSync-->>Extension: promise resolved

Просмотреть файл

@ -0,0 +1,40 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_extensions_storage_ExtensionStorageComponents_h_
#define mozilla_extensions_storage_ExtensionStorageComponents_h_
#include "mozIExtensionStorageArea.h"
#include "nsCOMPtr.h"
extern "C" {
// Implemented in Rust, in the `webext_storage_bridge` crate.
nsresult NS_NewExtensionStorageSyncArea(mozIExtensionStorageArea** aResult);
} // extern "C"
namespace mozilla {
namespace extensions {
namespace storage {
// The C++ constructor for a `storage.sync` area. This wrapper exists because
// `components.conf` requires a component class constructor to return an
// `already_AddRefed<T>`, but Rust doesn't have such a type. So we call the
// Rust constructor using a `nsCOMPtr` (which is compatible with Rust's
// `xpcom::RefPtr`) out param, and return that.
already_AddRefed<mozIExtensionStorageArea> NewSyncArea() {
nsCOMPtr<mozIExtensionStorageArea> storage;
nsresult rv = NS_NewExtensionStorageSyncArea(getter_AddRefs(storage));
if (NS_WARN_IF(NS_FAILED(rv))) {
return nullptr;
}
return storage.forget();
}
} // namespace storage
} // namespace extensions
} // namespace mozilla
#endif // mozilla_extensions_storage_ExtensionStorageComponents_h_

Просмотреть файл

@ -5,21 +5,114 @@
const lazy = {};
ChromeUtils.defineESModuleGetters(lazy, {
WebExtStorageStore: "resource://gre/modules/RustWebextstorage.sys.mjs",
AsyncShutdown: "resource://gre/modules/AsyncShutdown.sys.mjs",
FileUtils: "resource://gre/modules/FileUtils.sys.mjs",
});
function StorageSyncService() {}
const StorageSyncArea = Components.Constructor(
"@mozilla.org/extensions/storage/internal/sync-area;1",
"mozIConfigurableExtensionStorageArea",
"configure"
);
/**
* An XPCOM service for the WebExtension `storage.sync` API. The service manages
* a storage area for storing and syncing extension data.
*
* The service configures its storage area with the database path, and hands
* out references to the configured area via `getInterface`. It also registers
* a shutdown blocker to automatically tear down the area.
*
* ## What's the difference between `storage/internal/storage-sync-area;1` and
* `storage/sync;1`?
*
* `components.conf` has two classes:
* `@mozilla.org/extensions/storage/internal/sync-area;1` and
* `@mozilla.org/extensions/storage/sync;1`.
*
* The `storage/internal/sync-area;1` class is implemented in Rust, and can be
* instantiated using `createInstance` and `Components.Constructor`. It's not
* a singleton, so creating a new instance will create a new `storage.sync`
* area, with its own database connection. It's useful for testing, but not
* meant to be used outside of this module.
*
* The `storage/sync;1` class is implemented in this file. It's a singleton,
* ensuring there's only one `storage.sync` area, with one database connection.
* The service implements `nsIInterfaceRequestor`, so callers can access the
* storage interface like this:
*
* let storageSyncArea = Cc["@mozilla.org/extensions/storage/sync;1"]
* .getService(Ci.nsIInterfaceRequestor)
* .getInterface(Ci.mozIExtensionStorageArea);
*
* ...And the Sync interface like this:
*
* let extensionStorageEngine = Cc["@mozilla.org/extensions/storage/sync;1"]
* .getService(Ci.nsIInterfaceRequestor)
* .getInterface(Ci.mozIBridgedSyncEngine);
*
* @class
*/
export function StorageSyncService() {
if (StorageSyncService._singleton) {
return StorageSyncService._singleton;
}
let file = new lazy.FileUtils.File(
PathUtils.join(PathUtils.profileDir, "storage-sync-v2.sqlite")
);
let kintoFile = new lazy.FileUtils.File(
PathUtils.join(PathUtils.profileDir, "storage-sync.sqlite")
);
this._storageArea = new StorageSyncArea(file, kintoFile);
// Register a blocker to close the storage connection on shutdown.
this._shutdownBound = () => this._shutdown();
lazy.AsyncShutdown.profileChangeTeardown.addBlocker(
"StorageSyncService: shutdown",
this._shutdownBound
);
StorageSyncService._singleton = this;
}
StorageSyncService._singleton = null;
StorageSyncService.prototype = {
_storageAreaPromise: null,
async getStorageAreaInstance() {
if (!this._storageAreaPromise) {
let path = PathUtils.join(PathUtils.profileDir, "storage-sync-v2.sqlite");
this._storageAreaPromise = lazy.WebExtStorageStore.init(path);
}
QueryInterface: ChromeUtils.generateQI(["nsIInterfaceRequestor"]),
return await this._storageAreaPromise;
// Returns the storage and syncing interfaces. This just hands out a
// reference to the underlying storage area, with a quick check to make sure
// that callers are asking for the right interfaces.
getInterface(iid) {
if (
iid.equals(Ci.mozIExtensionStorageArea) ||
iid.equals(Ci.mozIBridgedSyncEngine)
) {
return this._storageArea.QueryInterface(iid);
}
throw Components.Exception(
"This interface isn't implemented",
Cr.NS_ERROR_NO_INTERFACE
);
},
// Tears down the storage area and lifts the blocker so that shutdown can
// continue.
async _shutdown() {
try {
await new Promise((resolve, reject) => {
this._storageArea.teardown({
handleSuccess: resolve,
handleError(code, message) {
reject(Components.Exception(message, code));
},
});
});
} finally {
lazy.AsyncShutdown.profileChangeTeardown.removeBlocker(
this._shutdownBound
);
}
},
};
export var storageSyncService = new StorageSyncService();

Просмотреть файл

@ -0,0 +1,22 @@
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
Classes = [
{
'cid': '{f1e424f2-67fe-4f69-a8f8-3993a71f44fa}',
'contract_ids': ['@mozilla.org/extensions/storage/internal/sync-area;1'],
'type': 'mozIExtensionStorageArea',
'headers': ['mozilla/extensions/storage/ExtensionStorageComponents.h'],
'constructor': 'mozilla::extensions::storage::NewSyncArea',
},
{
'cid': '{5b7047b4-fe17-4661-8e13-871402bc2023}',
'contract_ids': ['@mozilla.org/extensions/storage/sync;1'],
'esModule': 'resource://gre/modules/ExtensionStorageComponents.sys.mjs',
'constructor': 'StorageSyncService',
'singleton': True,
},
]

Просмотреть файл

@ -7,13 +7,27 @@
with Files("**"):
BUG_COMPONENT = ("WebExtensions", "Storage")
XPIDL_MODULE = "webextensions-storage"
XPIDL_SOURCES += [
"mozIExtensionStorageArea.idl",
]
# Don't build the Rust `storage.sync` bridge for GeckoView, as it will expose
# a delegate for consumers to use instead. Android Components can then provide
# an implementation of the delegate that's backed by the Rust component. For
# details, please see bug 1626506, comment 4.
if CONFIG["MOZ_WIDGET_TOOLKIT"] != "android":
EXPORTS.mozilla.extensions.storage += [
"ExtensionStorageComponents.h",
]
EXTRA_JS_MODULES += [
"ExtensionStorageComponents.sys.mjs",
]
XPCOM_MANIFESTS += [
"components.conf",
]
FINAL_LIBRARY = "xul"

Просмотреть файл

@ -0,0 +1,127 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsISupports.idl"
interface mozIExtensionStorageCallback;
interface nsIFile;
interface nsIVariant;
// Implements the operations needed to support the `StorageArea` WebExtension
// API.
[scriptable, uuid(d8eb3ff1-9b4b-435a-99ca-5b8cbaba2420)]
interface mozIExtensionStorageArea : nsISupports {
// These constants are exposed by the rust crate, but it's not worth the
// effort of jumping through the hoops to get them exposed to the JS
// code in a sane way - so we just duplicate them here. We should consider a
// test that checks they match the rust code.
// This interface is agnostic WRT the area, so we prefix the constants with
// the area - it's the consumer of this interface which knows what to use.
const unsigned long SYNC_QUOTA_BYTES = 102400;
const unsigned long SYNC_QUOTA_BYTES_PER_ITEM = 8192;
const unsigned long SYNC_MAX_ITEMS = 512;
// Sets one or more key-value pairs specified in `json` for the
// `extensionId`. If the `callback` implements
// `mozIExtensionStorageListener`, its `onChange`
// method will be called with the new and old values.
void set(in AUTF8String extensionId,
in AUTF8String json,
in mozIExtensionStorageCallback callback);
// Returns the value for the `key` in the storage area for the
// `extensionId`. `key` must be a JSON string containing either `null`,
// an array of string key names, a single string key name, or an object
// where the properties are the key names, and the values are the defaults
// if the key name doesn't exist in the storage area.
//
// If `get()` fails due to the quota being exceeded, the exception will
// have a result code of NS_ERROR_DOM_QUOTA_EXCEEDED_ERR (==0x80530016)
void get(in AUTF8String extensionId,
in AUTF8String key,
in mozIExtensionStorageCallback callback);
// Removes the `key` from the storage area for the `extensionId`. If `key`
// exists and the `callback` implements `mozIExtensionStorageListener`, its
// `onChanged` method will be called with the removed key-value pair.
void remove(in AUTF8String extensionId,
in AUTF8String key,
in mozIExtensionStorageCallback callback);
// Removes all keys from the storage area for the `extensionId`. If
// `callback` implements `mozIExtensionStorageListener`, its `onChange`
// method will be called with all removed key-value pairs.
void clear(in AUTF8String extensionId,
in mozIExtensionStorageCallback callback);
// Gets the number of bytes in use for the specified keys.
void getBytesInUse(in AUTF8String extensionId,
in AUTF8String keys,
in mozIExtensionStorageCallback callback);
// Gets and clears the information about the migration from the kinto
// database into the rust one. As "and clears" indicates, this will
// only produce a non-empty the first time it's called after a
// migration (which, hopefully, should only happen once).
void takeMigrationInfo(in mozIExtensionStorageCallback callback);
};
// Implements additional methods for setting up and tearing down the underlying
// database connection for a storage area. This is a separate interface because
// these methods are not part of the `StorageArea` API, and have restrictions on
// when they can be called.
[scriptable, uuid(2b008295-1bcc-4610-84f1-ad4cab2fa9ee)]
interface mozIConfigurableExtensionStorageArea : nsISupports {
// Sets up the storage area. An area can only be configured once; calling
// `configure` multiple times will throw. `configure` must also be called
// before any of the `mozIExtensionStorageArea` methods, or they'll fail
// with errors.
// The second param is the path to the kinto database file from which we
// should migrate. This should always be specified even when there's a
// chance the file doesn't exist.
void configure(in nsIFile databaseFile, in nsIFile kintoFile);
// Tears down the storage area, closing the backing database connection.
// This is called automatically when Firefox shuts down. Once a storage area
// has been shut down, all its methods will fail with errors. If `configure`
// hasn't been called for this area yet, `teardown` is a no-op.
void teardown(in mozIExtensionStorageCallback callback);
};
// Implements additional methods for syncing a storage area. This is a separate
// interface because these methods are not part of the `StorageArea` API, and
// have restrictions on when they can be called.
[scriptable, uuid(6dac82c9-1d8a-4893-8c0f-6e626aef802c)]
interface mozISyncedExtensionStorageArea : nsISupports {
// If a sync is in progress, this method fetches pending change
// notifications for all extensions whose storage areas were updated.
// `callback` should implement `mozIExtensionStorageListener` to forward
// the records to `storage.onChanged` listeners. This method should only
// be called by Sync, after `mozIBridgedSyncEngine.apply` and before
// `syncFinished`. It fetches nothing if called at any other time.
void fetchPendingSyncChanges(in mozIExtensionStorageCallback callback);
};
// A listener for storage area notifications.
[scriptable, uuid(8cb3c7e4-d0ca-4353-bccd-2673b4e11510)]
interface mozIExtensionStorageListener : nsISupports {
// Notifies that an operation has data to pass to `storage.onChanged`
// listeners for the given `extensionId`. `json` is a JSON array of listener
// infos. If an operation affects multiple extensions, this method will be
// called multiple times, once per extension.
void onChanged(in AUTF8String extensionId, in AUTF8String json);
};
// A generic callback for a storage operation. Either `handleSuccess` or
// `handleError` is guaranteed to be called once.
[scriptable, uuid(870dca40-6602-4748-8493-c4253eb7f322)]
interface mozIExtensionStorageCallback : nsISupports {
// Called when the operation completes. Operations that return a result,
// like `get`, will pass a `UTF8String` variant. Those that don't return
// anything, like `set` or `remove`, will pass a `null` variant.
void handleSuccess(in nsIVariant result);
// Called when the operation fails.
void handleError(in nsresult code, in AUTF8String message);
};

Просмотреть файл

@ -0,0 +1,25 @@
[package]
name = "webext_storage_bridge"
description = "The WebExtension `storage.sync` bindings for Firefox"
version = "0.1.0"
authors = ["The Firefox Sync Developers <sync-team@mozilla.com>"]
edition = "2018"
license = "MPL-2.0"
[dependencies]
anyhow = "1.0"
atomic_refcell = "0.1"
cstr = "0.2"
golden_gate = { path = "../../../../../services/sync/golden_gate" }
interrupt-support = "0.1"
moz_task = { path = "../../../../../xpcom/rust/moz_task" }
nserror = { path = "../../../../../xpcom/rust/nserror" }
nsstring = { path = "../../../../../xpcom/rust/nsstring" }
once_cell = "1"
thin-vec = { version = "0.2.1", features = ["gecko-ffi"] }
xpcom = { path = "../../../../../xpcom/rust/xpcom" }
serde = "1"
serde_json = "1"
storage_variant = { path = "../../../../../storage/variant" }
sql-support = "0.1"
webext-storage = "0.1"

Просмотреть файл

@ -0,0 +1,484 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{
cell::{Ref, RefCell},
convert::TryInto,
ffi::OsString,
mem,
path::PathBuf,
str,
sync::Arc,
};
use golden_gate::{ApplyTask, BridgedEngine, FerryTask};
use moz_task::{DispatchOptions, TaskRunnable};
use nserror::{nsresult, NS_OK};
use nsstring::{nsACString, nsCString, nsString};
use thin_vec::ThinVec;
use webext_storage::STORAGE_VERSION;
use xpcom::{
interfaces::{
mozIBridgedSyncEngineApplyCallback, mozIBridgedSyncEngineCallback,
mozIExtensionStorageCallback, mozIServicesLogSink, nsIFile, nsISerialEventTarget,
},
RefPtr,
};
use crate::error::{Error, Result};
use crate::punt::{Punt, PuntTask, TeardownTask};
use crate::store::{LazyStore, LazyStoreConfig};
fn path_from_nsifile(file: &nsIFile) -> Result<PathBuf> {
let mut raw_path = nsString::new();
// `nsIFile::GetPath` gives us a UTF-16-encoded version of its
// native path, which we must turn back into a platform-native
// string. We can't use `nsIFile::nativePath()` here because
// it's marked as `nostdcall`, which Rust doesn't support.
unsafe { file.GetPath(&mut *raw_path) }.to_result()?;
let native_path = {
// On Windows, we can create a native string directly from the
// encoded path.
#[cfg(windows)]
{
use std::os::windows::prelude::*;
OsString::from_wide(&raw_path)
}
// On other platforms, we must first decode the raw path from
// UTF-16, and then create our native string.
#[cfg(not(windows))]
OsString::from(String::from_utf16(&raw_path)?)
};
Ok(native_path.into())
}
/// An XPCOM component class for the Rust extension storage API. This class
/// implements the interfaces needed for syncing and storage.
///
/// This class can be created on any thread, but must not be shared between
/// threads. In Rust terms, it's `Send`, but not `Sync`.
#[xpcom(
implement(
mozIExtensionStorageArea,
mozIConfigurableExtensionStorageArea,
mozISyncedExtensionStorageArea,
mozIInterruptible,
mozIBridgedSyncEngine
),
nonatomic
)]
pub struct StorageSyncArea {
/// A background task queue, used to run all our storage operations on a
/// thread pool. Using a serial event target here means that all operations
/// will execute sequentially.
queue: RefPtr<nsISerialEventTarget>,
/// The store is lazily initialized on the task queue the first time it's
/// used.
store: RefCell<Option<Arc<LazyStore>>>,
}
/// `mozIExtensionStorageArea` implementation.
impl StorageSyncArea {
/// Creates a storage area and its task queue.
pub fn new() -> Result<RefPtr<StorageSyncArea>> {
let queue = moz_task::create_background_task_queue(cstr!("StorageSyncArea"))?;
Ok(StorageSyncArea::allocate(InitStorageSyncArea {
queue,
store: RefCell::new(Some(Arc::default())),
}))
}
/// Returns the store for this area, or an error if it's been torn down.
fn store(&self) -> Result<Ref<'_, Arc<LazyStore>>> {
let maybe_store = self.store.borrow();
if maybe_store.is_some() {
Ok(Ref::map(maybe_store, |s| s.as_ref().unwrap()))
} else {
Err(Error::AlreadyTornDown)
}
}
/// Dispatches a task for a storage operation to the task queue.
fn dispatch(&self, punt: Punt, callback: &mozIExtensionStorageCallback) -> Result<()> {
let name = punt.name();
let task = PuntTask::new(Arc::downgrade(&*self.store()?), punt, callback)?;
let runnable = TaskRunnable::new(name, Box::new(task))?;
// `may_block` schedules the runnable on a dedicated I/O pool.
TaskRunnable::dispatch_with_options(
runnable,
self.queue.coerce(),
DispatchOptions::new().may_block(true),
)?;
Ok(())
}
xpcom_method!(
configure => Configure(
database_file: *const nsIFile,
kinto_file: *const nsIFile
)
);
/// Sets up the storage area.
fn configure(&self, database_file: &nsIFile, kinto_file: &nsIFile) -> Result<()> {
self.store()?.configure(LazyStoreConfig {
path: path_from_nsifile(database_file)?,
kinto_path: path_from_nsifile(kinto_file)?,
})?;
Ok(())
}
xpcom_method!(
set => Set(
ext_id: *const ::nsstring::nsACString,
json: *const ::nsstring::nsACString,
callback: *const mozIExtensionStorageCallback
)
);
/// Sets one or more key-value pairs.
fn set(
&self,
ext_id: &nsACString,
json: &nsACString,
callback: &mozIExtensionStorageCallback,
) -> Result<()> {
self.dispatch(
Punt::Set {
ext_id: str::from_utf8(ext_id)?.into(),
value: serde_json::from_str(str::from_utf8(json)?)?,
},
callback,
)?;
Ok(())
}
xpcom_method!(
get => Get(
ext_id: *const ::nsstring::nsACString,
json: *const ::nsstring::nsACString,
callback: *const mozIExtensionStorageCallback
)
);
/// Gets values for one or more keys.
fn get(
&self,
ext_id: &nsACString,
json: &nsACString,
callback: &mozIExtensionStorageCallback,
) -> Result<()> {
self.dispatch(
Punt::Get {
ext_id: str::from_utf8(ext_id)?.into(),
keys: serde_json::from_str(str::from_utf8(json)?)?,
},
callback,
)
}
xpcom_method!(
remove => Remove(
ext_id: *const ::nsstring::nsACString,
json: *const ::nsstring::nsACString,
callback: *const mozIExtensionStorageCallback
)
);
/// Removes one or more keys and their values.
fn remove(
&self,
ext_id: &nsACString,
json: &nsACString,
callback: &mozIExtensionStorageCallback,
) -> Result<()> {
self.dispatch(
Punt::Remove {
ext_id: str::from_utf8(ext_id)?.into(),
keys: serde_json::from_str(str::from_utf8(json)?)?,
},
callback,
)
}
xpcom_method!(
clear => Clear(
ext_id: *const ::nsstring::nsACString,
callback: *const mozIExtensionStorageCallback
)
);
/// Removes all keys and values for the specified extension.
fn clear(&self, ext_id: &nsACString, callback: &mozIExtensionStorageCallback) -> Result<()> {
self.dispatch(
Punt::Clear {
ext_id: str::from_utf8(ext_id)?.into(),
},
callback,
)
}
xpcom_method!(
getBytesInUse => GetBytesInUse(
ext_id: *const ::nsstring::nsACString,
keys: *const ::nsstring::nsACString,
callback: *const mozIExtensionStorageCallback
)
);
/// Obtains the count of bytes in use for the specified key or for all keys.
fn getBytesInUse(
&self,
ext_id: &nsACString,
keys: &nsACString,
callback: &mozIExtensionStorageCallback,
) -> Result<()> {
self.dispatch(
Punt::GetBytesInUse {
ext_id: str::from_utf8(ext_id)?.into(),
keys: serde_json::from_str(str::from_utf8(keys)?)?,
},
callback,
)
}
xpcom_method!(teardown => Teardown(callback: *const mozIExtensionStorageCallback));
/// Tears down the storage area, closing the backing database connection.
fn teardown(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
// Each storage task holds a `Weak` reference to the store, which it
// upgrades to an `Arc` (strong reference) when the task runs on the
// background queue. The strong reference is dropped when the task
// finishes. When we tear down the storage area, we relinquish our one
// owned strong reference to the `TeardownTask`. Because we're using a
// task queue, when the `TeardownTask` runs, it should have the only
// strong reference to the store, since all other tasks that called
// `Weak::upgrade` will have already finished. The `TeardownTask` can
// then consume the `Arc` and destroy the store.
let mut maybe_store = self.store.borrow_mut();
match mem::take(&mut *maybe_store) {
Some(store) => {
// Interrupt any currently-running statements.
store.interrupt();
// If dispatching the runnable fails, we'll leak the store
// without closing its database connection.
teardown(&self.queue, store, callback)?;
}
None => return Err(Error::AlreadyTornDown),
}
Ok(())
}
xpcom_method!(takeMigrationInfo => TakeMigrationInfo(callback: *const mozIExtensionStorageCallback));
/// Fetch-and-delete (e.g. `take`) information about the migration from the
/// kinto-based extension-storage to the rust-based storage.
fn takeMigrationInfo(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
self.dispatch(Punt::TakeMigrationInfo, callback)
}
}
fn teardown(
queue: &nsISerialEventTarget,
store: Arc<LazyStore>,
callback: &mozIExtensionStorageCallback,
) -> Result<()> {
let task = TeardownTask::new(store, callback)?;
let runnable = TaskRunnable::new(TeardownTask::name(), Box::new(task))?;
TaskRunnable::dispatch_with_options(
runnable,
queue.coerce(),
DispatchOptions::new().may_block(true),
)?;
Ok(())
}
/// `mozISyncedExtensionStorageArea` implementation.
impl StorageSyncArea {
xpcom_method!(
fetch_pending_sync_changes => FetchPendingSyncChanges(callback: *const mozIExtensionStorageCallback)
);
fn fetch_pending_sync_changes(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
self.dispatch(Punt::FetchPendingSyncChanges, callback)
}
}
/// `mozIInterruptible` implementation.
impl StorageSyncArea {
xpcom_method!(
interrupt => Interrupt()
);
/// Interrupts any operations currently running on the background task
/// queue.
fn interrupt(&self) -> Result<()> {
self.store()?.interrupt();
Ok(())
}
}
/// `mozIBridgedSyncEngine` implementation.
impl StorageSyncArea {
xpcom_method!(get_logger => GetLogger() -> *const mozIServicesLogSink);
fn get_logger(&self) -> Result<RefPtr<mozIServicesLogSink>> {
Err(NS_OK)?
}
xpcom_method!(set_logger => SetLogger(logger: *const mozIServicesLogSink));
fn set_logger(&self, _logger: Option<&mozIServicesLogSink>) -> Result<()> {
Ok(())
}
xpcom_method!(get_storage_version => GetStorageVersion() -> i32);
fn get_storage_version(&self) -> Result<i32> {
Ok(STORAGE_VERSION.try_into().unwrap())
}
// It's possible that migration, or even merging, will result in records
// too large for the server. We tolerate that (and hope that the addons do
// too :)
xpcom_method!(get_allow_skipped_record => GetAllowSkippedRecord() -> bool);
fn get_allow_skipped_record(&self) -> Result<bool> {
Ok(true)
}
xpcom_method!(
get_last_sync => GetLastSync(
callback: *const mozIBridgedSyncEngineCallback
)
);
fn get_last_sync(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
Ok(FerryTask::for_last_sync(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
}
xpcom_method!(
set_last_sync => SetLastSync(
last_sync_millis: i64,
callback: *const mozIBridgedSyncEngineCallback
)
);
fn set_last_sync(
&self,
last_sync_millis: i64,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<()> {
Ok(
FerryTask::for_set_last_sync(self.new_bridge()?, last_sync_millis, callback)?
.dispatch(&self.queue)?,
)
}
xpcom_method!(
get_sync_id => GetSyncId(
callback: *const mozIBridgedSyncEngineCallback
)
);
fn get_sync_id(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
Ok(FerryTask::for_sync_id(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
}
xpcom_method!(
reset_sync_id => ResetSyncId(
callback: *const mozIBridgedSyncEngineCallback
)
);
fn reset_sync_id(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
Ok(FerryTask::for_reset_sync_id(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
}
xpcom_method!(
ensure_current_sync_id => EnsureCurrentSyncId(
new_sync_id: *const nsACString,
callback: *const mozIBridgedSyncEngineCallback
)
);
fn ensure_current_sync_id(
&self,
new_sync_id: &nsACString,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<()> {
Ok(
FerryTask::for_ensure_current_sync_id(self.new_bridge()?, new_sync_id, callback)?
.dispatch(&self.queue)?,
)
}
xpcom_method!(
sync_started => SyncStarted(
callback: *const mozIBridgedSyncEngineCallback
)
);
fn sync_started(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
Ok(FerryTask::for_sync_started(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
}
xpcom_method!(
store_incoming => StoreIncoming(
incoming_envelopes_json: *const ThinVec<::nsstring::nsCString>,
callback: *const mozIBridgedSyncEngineCallback
)
);
fn store_incoming(
&self,
incoming_envelopes_json: Option<&ThinVec<nsCString>>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<()> {
Ok(FerryTask::for_store_incoming(
self.new_bridge()?,
incoming_envelopes_json.map(|v| v.as_slice()).unwrap_or(&[]),
callback,
)?
.dispatch(&self.queue)?)
}
xpcom_method!(apply => Apply(callback: *const mozIBridgedSyncEngineApplyCallback));
fn apply(&self, callback: &mozIBridgedSyncEngineApplyCallback) -> Result<()> {
Ok(ApplyTask::new(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
}
xpcom_method!(
set_uploaded => SetUploaded(
server_modified_millis: i64,
uploaded_ids: *const ThinVec<::nsstring::nsCString>,
callback: *const mozIBridgedSyncEngineCallback
)
);
fn set_uploaded(
&self,
server_modified_millis: i64,
uploaded_ids: Option<&ThinVec<nsCString>>,
callback: &mozIBridgedSyncEngineCallback,
) -> Result<()> {
Ok(FerryTask::for_set_uploaded(
self.new_bridge()?,
server_modified_millis,
uploaded_ids.map(|v| v.as_slice()).unwrap_or(&[]),
callback,
)?
.dispatch(&self.queue)?)
}
xpcom_method!(
sync_finished => SyncFinished(
callback: *const mozIBridgedSyncEngineCallback
)
);
fn sync_finished(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
Ok(FerryTask::for_sync_finished(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
}
xpcom_method!(
reset => Reset(
callback: *const mozIBridgedSyncEngineCallback
)
);
fn reset(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
Ok(FerryTask::for_reset(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
}
xpcom_method!(
wipe => Wipe(
callback: *const mozIBridgedSyncEngineCallback
)
);
fn wipe(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
Ok(FerryTask::for_wipe(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
}
fn new_bridge(&self) -> Result<Box<dyn BridgedEngine>> {
Ok(Box::new(self.store()?.get()?.bridged_engine()))
}
}

Просмотреть файл

@ -0,0 +1,124 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{error, fmt, result, str::Utf8Error, string::FromUtf16Error};
use golden_gate::Error as GoldenGateError;
use nserror::{
nsresult, NS_ERROR_ALREADY_INITIALIZED, NS_ERROR_CANNOT_CONVERT_DATA,
NS_ERROR_DOM_QUOTA_EXCEEDED_ERR, NS_ERROR_FAILURE, NS_ERROR_INVALID_ARG,
NS_ERROR_NOT_IMPLEMENTED, NS_ERROR_NOT_INITIALIZED, NS_ERROR_UNEXPECTED,
};
use serde_json::error::Error as JsonError;
use webext_storage::error::Error as WebextStorageError;
/// A specialized `Result` type for extension storage operations.
pub type Result<T> = result::Result<T, Error>;
/// The error type for extension storage operations. Errors can be converted
/// into `nsresult` codes, and include more detailed messages that can be passed
/// to callbacks.
#[derive(Debug)]
pub enum Error {
Nsresult(nsresult),
WebextStorage(WebextStorageError),
MigrationFailed(WebextStorageError),
GoldenGate(GoldenGateError),
MalformedString(Box<dyn error::Error + Send + Sync + 'static>),
AlreadyConfigured,
NotConfigured,
AlreadyRan(&'static str),
DidNotRun(&'static str),
AlreadyTornDown,
NotImplemented,
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
Error::MalformedString(error) => Some(error.as_ref()),
_ => None,
}
}
}
impl From<nsresult> for Error {
fn from(result: nsresult) -> Error {
Error::Nsresult(result)
}
}
impl From<WebextStorageError> for Error {
fn from(error: WebextStorageError) -> Error {
Error::WebextStorage(error)
}
}
impl From<GoldenGateError> for Error {
fn from(error: GoldenGateError) -> Error {
Error::GoldenGate(error)
}
}
impl From<Utf8Error> for Error {
fn from(error: Utf8Error) -> Error {
Error::MalformedString(error.into())
}
}
impl From<FromUtf16Error> for Error {
fn from(error: FromUtf16Error) -> Error {
Error::MalformedString(error.into())
}
}
impl From<JsonError> for Error {
fn from(error: JsonError) -> Error {
Error::MalformedString(error.into())
}
}
impl From<Error> for nsresult {
fn from(error: Error) -> nsresult {
match error {
Error::Nsresult(result) => result,
Error::WebextStorage(e) => match e {
WebextStorageError::QuotaError(_) => NS_ERROR_DOM_QUOTA_EXCEEDED_ERR,
_ => NS_ERROR_FAILURE,
},
Error::MigrationFailed(_) => NS_ERROR_CANNOT_CONVERT_DATA,
Error::GoldenGate(error) => error.into(),
Error::MalformedString(_) => NS_ERROR_INVALID_ARG,
Error::AlreadyConfigured => NS_ERROR_ALREADY_INITIALIZED,
Error::NotConfigured => NS_ERROR_NOT_INITIALIZED,
Error::AlreadyRan(_) => NS_ERROR_UNEXPECTED,
Error::DidNotRun(_) => NS_ERROR_UNEXPECTED,
Error::AlreadyTornDown => NS_ERROR_UNEXPECTED,
Error::NotImplemented => NS_ERROR_NOT_IMPLEMENTED,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::Nsresult(result) => write!(f, "Operation failed with {result}"),
Error::WebextStorage(error) => error.fmt(f),
Error::MigrationFailed(error) => write!(f, "Migration failed with {error}"),
Error::GoldenGate(error) => error.fmt(f),
Error::MalformedString(error) => error.fmt(f),
Error::AlreadyConfigured => write!(f, "The storage area is already configured"),
Error::NotConfigured => write!(
f,
"The storage area must be configured by calling `configure` first"
),
Error::AlreadyRan(what) => write!(f, "`{what}` already ran on the background thread"),
Error::DidNotRun(what) => write!(f, "`{what}` didn't run on the background thread"),
Error::AlreadyTornDown => {
write!(f, "Can't use a storage area that's already torn down")
}
Error::NotImplemented => write!(f, "Operation not implemented"),
}
}
}

Просмотреть файл

@ -0,0 +1,65 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(non_snake_case)]
//! This crate bridges the WebExtension storage area interfaces in Firefox
//! Desktop to the extension storage Rust component in Application Services.
//!
//! ## How are the WebExtension storage APIs implemented in Firefox?
//!
//! There are three storage APIs available for WebExtensions:
//! `storage.local`, which is stored locally in an IndexedDB database and never
//! synced to other devices, `storage.sync`, which is stored in a local SQLite
//! database and synced to all devices signed in to the same Firefox Account,
//! and `storage.managed`, which is provisioned in a native manifest and
//! read-only.
//!
//! * `storage.local` is implemented in `ExtensionStorageIDB.sys.mjs`.
//! * `storage.sync` is implemented in a Rust component, `webext_storage`. This
//! Rust component is vendored in m-c, and exposed to JavaScript via an XPCOM
//! API in `webext_storage_bridge` (this crate). Eventually, we'll change
//! `ExtensionStorageSync.sys.mjs` to call the XPCOM API instead of using the
//! old Kinto storage adapter.
//! * `storage.managed` is implemented directly in `parent/ext-storage.js`.
//!
//! `webext_storage_bridge` implements the `mozIExtensionStorageArea`
//! (and, eventually, `mozIBridgedSyncEngine`) interface for `storage.sync`. The
//! implementation is in `area::StorageSyncArea`, and is backed by the
//! `webext_storage` component.
#[macro_use]
extern crate cstr;
#[macro_use]
extern crate xpcom;
mod area;
mod error;
mod punt;
mod store;
use nserror::{nsresult, NS_OK};
use xpcom::{interfaces::mozIExtensionStorageArea, RefPtr};
use crate::area::StorageSyncArea;
/// The constructor for a `storage.sync` area. This uses C linkage so that it
/// can be called from C++. See `ExtensionStorageComponents.h` for the C++
/// constructor that's passed to the component manager.
///
/// # Safety
///
/// This function is unsafe because it dereferences `result`.
#[no_mangle]
pub unsafe extern "C" fn NS_NewExtensionStorageSyncArea(
result: *mut *const mozIExtensionStorageArea,
) -> nsresult {
match StorageSyncArea::new() {
Ok(bridge) => {
RefPtr::new(bridge.coerce::<mozIExtensionStorageArea>()).forget(&mut *result);
NS_OK
}
Err(err) => err.into(),
}
}

Просмотреть файл

@ -0,0 +1,321 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{
borrow::Borrow,
fmt::Write,
mem, result, str,
sync::{Arc, Weak},
};
use atomic_refcell::AtomicRefCell;
use moz_task::{Task, ThreadPtrHandle, ThreadPtrHolder};
use nserror::nsresult;
use nsstring::nsCString;
use serde::Serialize;
use serde_json::Value as JsonValue;
use storage_variant::VariantType;
use xpcom::{
interfaces::{mozIExtensionStorageCallback, mozIExtensionStorageListener},
RefPtr, XpCom,
};
use crate::error::{Error, Result};
use crate::store::LazyStore;
/// A storage operation that's punted from the main thread to the background
/// task queue.
pub enum Punt {
/// Get the values of the keys for an extension.
Get { ext_id: String, keys: JsonValue },
/// Set a key-value pair for an extension.
Set { ext_id: String, value: JsonValue },
/// Remove one or more keys for an extension.
Remove { ext_id: String, keys: JsonValue },
/// Clear all keys and values for an extension.
Clear { ext_id: String },
/// Returns the bytes in use for the specified, or all, keys.
GetBytesInUse { ext_id: String, keys: JsonValue },
/// Fetches all pending Sync change notifications to pass to
/// `storage.onChanged` listeners.
FetchPendingSyncChanges,
/// Fetch-and-delete (e.g. `take`) information about the migration from the
/// kinto-based extension-storage to the rust-based storage.
///
/// This data is stored in the database instead of just being returned by
/// the call to `migrate`, as we may migrate prior to telemetry being ready.
TakeMigrationInfo,
}
impl Punt {
/// Returns the operation name, used to label the task runnable and report
/// errors.
pub fn name(&self) -> &'static str {
match self {
Punt::Get { .. } => "webext_storage::get",
Punt::Set { .. } => "webext_storage::set",
Punt::Remove { .. } => "webext_storage::remove",
Punt::Clear { .. } => "webext_storage::clear",
Punt::GetBytesInUse { .. } => "webext_storage::get_bytes_in_use",
Punt::FetchPendingSyncChanges => "webext_storage::fetch_pending_sync_changes",
Punt::TakeMigrationInfo => "webext_storage::take_migration_info",
}
}
}
/// A storage operation result, punted from the background queue back to the
/// main thread.
#[derive(Default)]
struct PuntResult {
changes: Vec<Change>,
value: Option<String>,
}
/// A change record for an extension.
struct Change {
ext_id: String,
json: String,
}
impl PuntResult {
/// Creates a result with a single change to pass to `onChanged`, and no
/// return value for `handleSuccess`. The `Borrow` bound lets this method
/// take either a borrowed reference or an owned value.
fn with_change<T: Borrow<S>, S: Serialize>(ext_id: &str, changes: T) -> Result<Self> {
Ok(PuntResult {
changes: vec![Change {
ext_id: ext_id.into(),
json: serde_json::to_string(changes.borrow())?,
}],
value: None,
})
}
/// Creates a result with changes for multiple extensions to pass to
/// `onChanged`, and no return value for `handleSuccess`.
fn with_changes(changes: Vec<Change>) -> Self {
PuntResult {
changes,
value: None,
}
}
/// Creates a result with no changes to pass to `onChanged`, and a return
/// value for `handleSuccess`.
fn with_value<T: Borrow<S>, S: Serialize>(value: T) -> Result<Self> {
Ok(PuntResult {
changes: Vec::new(),
value: Some(serde_json::to_string(value.borrow())?),
})
}
}
/// A generic task used for all storage operations. Punts the operation to the
/// background task queue, receives a result back on the main thread, and calls
/// the callback with it.
pub struct PuntTask {
name: &'static str,
/// Storage tasks hold weak references to the store, which they upgrade
/// to strong references when running on the background queue. This
/// ensures that pending storage tasks don't block teardown (for example,
/// if a consumer calls `get` and then `teardown`, without waiting for
/// `get` to finish).
store: Weak<LazyStore>,
punt: AtomicRefCell<Option<Punt>>,
callback: ThreadPtrHandle<mozIExtensionStorageCallback>,
result: AtomicRefCell<Result<PuntResult>>,
}
impl PuntTask {
/// Creates a storage task that punts an operation to the background queue.
/// Returns an error if the task couldn't be created because the thread
/// manager is shutting down.
pub fn new(
store: Weak<LazyStore>,
punt: Punt,
callback: &mozIExtensionStorageCallback,
) -> Result<Self> {
let name = punt.name();
Ok(Self {
name,
store,
punt: AtomicRefCell::new(Some(punt)),
callback: ThreadPtrHolder::new(
cstr!("mozIExtensionStorageCallback"),
RefPtr::new(callback),
)?,
result: AtomicRefCell::new(Err(Error::DidNotRun(name))),
})
}
/// Upgrades the task's weak `LazyStore` reference to a strong one. Returns
/// an error if the store has been torn down.
///
/// It's important that this is called on the background queue, after the
/// task has been dispatched. Storage tasks shouldn't hold strong references
/// to the store on the main thread, because then they might block teardown.
fn store(&self) -> Result<Arc<LazyStore>> {
match self.store.upgrade() {
Some(store) => Ok(store),
None => Err(Error::AlreadyTornDown),
}
}
/// Runs this task's storage operation on the background queue.
fn inner_run(&self, punt: Punt) -> Result<PuntResult> {
Ok(match punt {
Punt::Set { ext_id, value } => {
PuntResult::with_change(&ext_id, self.store()?.get()?.set(&ext_id, value)?)?
}
Punt::Get { ext_id, keys } => {
PuntResult::with_value(self.store()?.get()?.get(&ext_id, keys)?)?
}
Punt::Remove { ext_id, keys } => {
PuntResult::with_change(&ext_id, self.store()?.get()?.remove(&ext_id, keys)?)?
}
Punt::Clear { ext_id } => {
PuntResult::with_change(&ext_id, self.store()?.get()?.clear(&ext_id)?)?
}
Punt::GetBytesInUse { ext_id, keys } => {
PuntResult::with_value(self.store()?.get()?.get_bytes_in_use(&ext_id, keys)?)?
}
Punt::FetchPendingSyncChanges => PuntResult::with_changes(
self.store()?
.get()?
.get_synced_changes()?
.into_iter()
.map(|info| Change {
ext_id: info.ext_id,
json: info.changes,
})
.collect(),
),
Punt::TakeMigrationInfo => {
PuntResult::with_value(self.store()?.get()?.take_migration_info()?)?
}
})
}
}
impl Task for PuntTask {
fn run(&self) {
*self.result.borrow_mut() = match self.punt.borrow_mut().take() {
Some(punt) => self.inner_run(punt),
// A task should never run on the background queue twice, but we
// return an error just in case.
None => Err(Error::AlreadyRan(self.name)),
};
}
fn done(&self) -> result::Result<(), nsresult> {
let callback = self.callback.get().unwrap();
// As above, `done` should never be called multiple times, but we handle
// that by returning an error.
match mem::replace(
&mut *self.result.borrow_mut(),
Err(Error::AlreadyRan(self.name)),
) {
Ok(PuntResult { changes, value }) => {
// If we have change data, and the callback implements the
// listener interface, notify about it first.
if let Some(listener) = callback.query_interface::<mozIExtensionStorageListener>() {
for Change { ext_id, json } in changes {
// Ignore errors.
let _ = unsafe {
listener.OnChanged(&*nsCString::from(ext_id), &*nsCString::from(json))
};
}
}
let result = value.map(nsCString::from).into_variant();
unsafe { callback.HandleSuccess(result.coerce()) }
}
Err(err) => {
let mut message = nsCString::new();
write!(message, "{err}").unwrap();
unsafe { callback.HandleError(err.into(), &*message) }
}
}
.to_result()
}
}
/// A task to tear down the store on the background task queue.
pub struct TeardownTask {
/// Unlike storage tasks, the teardown task holds a strong reference to
/// the store, which it drops on the background queue. This is the only
/// task that should do that.
store: AtomicRefCell<Option<Arc<LazyStore>>>,
callback: ThreadPtrHandle<mozIExtensionStorageCallback>,
result: AtomicRefCell<Result<()>>,
}
impl TeardownTask {
/// Creates a teardown task. This should only be created and dispatched
/// once, to clean up the store at shutdown. Returns an error if the task
/// couldn't be created because the thread manager is shutting down.
pub fn new(store: Arc<LazyStore>, callback: &mozIExtensionStorageCallback) -> Result<Self> {
Ok(Self {
store: AtomicRefCell::new(Some(store)),
callback: ThreadPtrHolder::new(
cstr!("mozIExtensionStorageCallback"),
RefPtr::new(callback),
)?,
result: AtomicRefCell::new(Err(Error::DidNotRun(Self::name()))),
})
}
/// Returns the task name, used to label its runnable and report errors.
pub fn name() -> &'static str {
"webext_storage::teardown"
}
/// Tears down and drops the store on the background queue.
fn inner_run(&self, store: Arc<LazyStore>) -> Result<()> {
// At this point, we should be holding the only strong reference
// to the store, since 1) `StorageSyncArea` gave its one strong
// reference to our task, and 2) we're running on a background
// task queue, which runs all tasks sequentially...so no other
// `PuntTask`s should be running and trying to upgrade their
// weak references. So we can unwrap the `Arc` and take ownership
// of the store.
match Arc::try_unwrap(store) {
Ok(store) => store.teardown(),
Err(_) => {
// If unwrapping the `Arc` fails, someone else must have
// a strong reference to the store. We could sleep and
// try again, but this is so unexpected that it's easier
// to just leak the store, and return an error to the
// callback. Except in tests, we only call `teardown` at
// shutdown, so the resources will get reclaimed soon,
// anyway.
Err(Error::DidNotRun(Self::name()))
}
}
}
}
impl Task for TeardownTask {
fn run(&self) {
*self.result.borrow_mut() = match self.store.borrow_mut().take() {
Some(store) => self.inner_run(store),
None => Err(Error::AlreadyRan(Self::name())),
};
}
fn done(&self) -> result::Result<(), nsresult> {
let callback = self.callback.get().unwrap();
match mem::replace(
&mut *self.result.borrow_mut(),
Err(Error::AlreadyRan(Self::name())),
) {
Ok(()) => unsafe { callback.HandleSuccess(().into_variant().coerce()) },
Err(err) => {
let mut message = nsCString::new();
write!(message, "{err}").unwrap();
unsafe { callback.HandleError(err.into(), &*message) }
}
}
.to_result()
}
}

Просмотреть файл

@ -0,0 +1,136 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{fs::remove_file, path::PathBuf, sync::Arc};
use interrupt_support::SqlInterruptHandle;
use once_cell::sync::OnceCell;
use webext_storage::store::WebExtStorageStore as Store;
use crate::error::{self, Error};
/// Options for an extension storage area.
pub struct LazyStoreConfig {
/// The path to the database file for this storage area.
pub path: PathBuf,
/// The path to the old kinto database. If it exists, we should attempt to
/// migrate from this database as soon as we open our DB. It's not Option<>
/// because the caller will not have checked whether it exists or not, so
/// will assume it might.
pub kinto_path: PathBuf,
}
/// A lazy store is automatically initialized on a background thread with its
/// configuration the first time it's used.
#[derive(Default)]
pub struct LazyStore {
store: OnceCell<InterruptStore>,
config: OnceCell<LazyStoreConfig>,
}
/// An `InterruptStore` wraps an inner extension store, and its interrupt
/// handle.
struct InterruptStore {
inner: Store,
handle: Arc<SqlInterruptHandle>,
}
impl LazyStore {
/// Configures the lazy store. Returns an error if the store has already
/// been configured. This method should be called from the main thread.
pub fn configure(&self, config: LazyStoreConfig) -> error::Result<()> {
self.config
.set(config)
.map_err(|_| Error::AlreadyConfigured)
}
/// Interrupts all pending operations on the store. If a database statement
/// is currently running, this will interrupt that statement. If the
/// statement is a write inside an active transaction, the entire
/// transaction will be rolled back. This method should be called from the
/// main thread.
pub fn interrupt(&self) {
if let Some(outer) = self.store.get() {
outer.handle.interrupt();
}
}
/// Returns the underlying store, initializing it if needed. This method
/// should only be called from a background thread or task queue, since
/// opening the database does I/O.
pub fn get(&self) -> error::Result<&Store> {
Ok(&self
.store
.get_or_try_init(|| match self.config.get() {
Some(config) => {
let store = init_store(config)?;
let handle = store.interrupt_handle();
Ok(InterruptStore {
inner: store,
handle,
})
}
None => Err(Error::NotConfigured),
})?
.inner)
}
/// Tears down the store. If the store wasn't initialized, this is a no-op.
/// This should only be called from a background thread or task queue,
/// because closing the database also does I/O.
pub fn teardown(self) -> error::Result<()> {
if let Some(store) = self.store.into_inner() {
store.inner.close()?;
}
Ok(())
}
}
// Initialize the store, performing a migration if necessary.
// The requirements for migration are, roughly:
// * If kinto_path doesn't exist, we don't try to migrate.
// * If our DB path exists, we assume we've already migrated and don't try again
// * If the migration fails, we close our store and delete the DB, then return
// a special error code which tells our caller about the failure. It's then
// expected to fallback to the "old" kinto store and we'll try next time.
// Note that the migrate() method on the store is written such that is should
// ignore all "read" errors from the source, but propagate "write" errors on our
// DB - the intention is that things like corrupted source databases never fail,
// but disk-space failures on our database does.
fn init_store(config: &LazyStoreConfig) -> error::Result<Store> {
let should_migrate = config.kinto_path.exists() && !config.path.exists();
let store = Store::new(&config.path)?;
if should_migrate {
match store.migrate(&config.kinto_path) {
// It's likely to be too early for us to stick the MigrationInfo
// into the sync telemetry, a separate call to `take_migration_info`
// must be made to the store (this is done by telemetry after it's
// ready to submit the data).
Ok(()) => {
// need logging, but for now let's print to stdout.
println!("extension-storage: migration complete");
Ok(store)
}
Err(e) => {
println!("extension-storage: migration failure: {e}");
if let Err(e) = store.close() {
// welp, this probably isn't going to end well...
println!(
"extension-storage: failed to close the store after migration failure: {e}"
);
}
if let Err(e) = remove_file(&config.path) {
// this is bad - if it happens regularly it will defeat
// out entire migration strategy - we'll assume it
// worked.
// So it's desirable to make noise if this happens.
println!("Failed to remove file after failed migration: {e}");
}
Err(Error::MigrationFailed(e))
}
}
} else {
Ok(store)
}
}

Просмотреть файл

@ -4,6 +4,8 @@
/* import-globals-from head.js */
const STORAGE_SYNC_PREF = "webextensions.storage.sync.enabled";
// Test implementations and utility functions that are used against multiple
// storage areas (eg, a test which is run against browser.storage.local and
// browser.storage.sync, or a test against browser.storage.sync but needs to
@ -74,6 +76,49 @@ async function checkGetImpl(areaName, prop, value) {
);
}
function test_config_flag_needed() {
async function testFn() {
function background() {
let promises = [];
let apiTests = [
{ method: "get", args: ["foo"] },
{ method: "set", args: [{ foo: "bar" }] },
{ method: "remove", args: ["foo"] },
{ method: "clear", args: [] },
];
apiTests.forEach(testDef => {
promises.push(
browser.test.assertRejects(
browser.storage.sync[testDef.method](...testDef.args),
"Please set webextensions.storage.sync.enabled to true in about:config",
`storage.sync.${testDef.method} is behind a flag`
)
);
});
Promise.all(promises).then(() => browser.test.notifyPass("flag needed"));
}
ok(
!Services.prefs.getBoolPref(STORAGE_SYNC_PREF, false),
"The `${STORAGE_SYNC_PREF}` should be set to false"
);
let extension = ExtensionTestUtils.loadExtension({
manifest: {
permissions: ["storage"],
},
background,
});
await extension.startup();
await extension.awaitFinish("flag needed");
await extension.unload();
}
return runWithPrefs([[STORAGE_SYNC_PREF, false]], testFn);
}
async function test_storage_after_reload(areaName, { expectPersistency }) {
// Just some random extension ID that we can re-use
const extensionId = "my-extension-id@1";
@ -124,8 +169,15 @@ async function test_storage_after_reload(areaName, { expectPersistency }) {
await extension2.unload();
}
async function test_sync_reloading_extensions_works() {
await test_storage_after_reload("sync", { expectPersistency: true });
function test_sync_reloading_extensions_works() {
return runWithPrefs([[STORAGE_SYNC_PREF, true]], async () => {
ok(
Services.prefs.getBoolPref(STORAGE_SYNC_PREF, false),
"The `${STORAGE_SYNC_PREF}` should be set to true"
);
await test_storage_after_reload("sync", { expectPersistency: true });
});
}
async function test_background_page_storage(testAreaName) {
@ -649,7 +701,7 @@ async function test_background_page_storage(testAreaName) {
await extension.unload();
}
async function test_storage_sync_requires_real_id() {
function test_storage_sync_requires_real_id() {
async function testFn() {
async function background() {
const EXCEPTION_MESSAGE =
@ -680,7 +732,7 @@ async function test_storage_sync_requires_real_id() {
await extension.unload();
}
return await testFn();
return runWithPrefs([[STORAGE_SYNC_PREF, true]], testFn);
}
// Test for storage areas which don't support getBytesInUse() nor QUOTA

Просмотреть файл

@ -4,6 +4,8 @@
"use strict";
/* exported withSyncContext */
const { ExtensionCommon } = ChromeUtils.importESModule(
"resource://gre/modules/ExtensionCommon.sys.mjs"
);
@ -42,3 +44,23 @@ async function withContext(f) {
await context.unload();
}
}
/**
* Like withContext(), but also turn on the "storage.sync" pref for
* the duration of the function.
* Calls to this function can be replaced with calls to withContext
* once the pref becomes on by default.
*
* @param {Function} f the function to call
*/
async function withSyncContext(f) {
const STORAGE_SYNC_PREF = "webextensions.storage.sync.enabled";
let prefs = Services.prefs;
try {
prefs.setBoolPref(STORAGE_SYNC_PREF, true);
await withContext(f);
} finally {
prefs.clearUserPref(STORAGE_SYNC_PREF);
}
}

Просмотреть файл

@ -0,0 +1,86 @@
/* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */
/* vim: set sts=2 sw=2 et tw=80: */
"use strict";
// Import the rust-based and kinto-based implementations
const { extensionStorageSync: rustImpl } = ChromeUtils.importESModule(
"resource://gre/modules/ExtensionStorageSync.sys.mjs"
);
const { extensionStorageSyncKinto: kintoImpl } = ChromeUtils.importESModule(
"resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs"
);
Services.prefs.setBoolPref("webextensions.storage.sync.kinto", false);
add_task(async function test_sync_migration() {
// There's no good reason to perform this test via test extensions - we just
// call the underlying APIs directly.
// Set some stuff using the kinto-based impl.
let e1 = { id: "test@mozilla.com" };
let c1 = { extension: e1, callOnClose() {} };
await kintoImpl.set(e1, { foo: "bar" }, c1);
let e2 = { id: "test-2@mozilla.com" };
let c2 = { extension: e2, callOnClose() {} };
await kintoImpl.set(e2, { second: "2nd" }, c2);
let e3 = { id: "test-3@mozilla.com" };
let c3 = { extension: e3, callOnClose() {} };
// And all the data should be magically migrated.
Assert.deepEqual(await rustImpl.get(e1, "foo", c1), { foo: "bar" });
Assert.deepEqual(await rustImpl.get(e2, null, c2), { second: "2nd" });
// Sanity check we really are doing what we think we are - set a value in our
// new one, it should not be reflected by kinto.
await rustImpl.set(e3, { third: "3rd" }, c3);
Assert.deepEqual(await rustImpl.get(e3, null, c3), { third: "3rd" });
Assert.deepEqual(await kintoImpl.get(e3, null, c3), {});
// cleanup.
await kintoImpl.clear(e1, c1);
await kintoImpl.clear(e2, c2);
await kintoImpl.clear(e3, c3);
await rustImpl.clear(e1, c1);
await rustImpl.clear(e2, c2);
await rustImpl.clear(e3, c3);
});
// It would be great to have failure tests, but that seems impossible to have
// in automated tests given the conditions under which we migrate - it would
// basically require us to arrange for zero free disk space or to somehow
// arrange for sqlite to see an io error. Specially crafted "corrupt"
// sqlite files doesn't help because that file must not exist for us to even
// attempt migration.
//
// But - what we can test is that if .migratedOk on the new impl ever goes to
// false we delegate correctly.
add_task(async function test_sync_migration_delgates() {
let e1 = { id: "test@mozilla.com" };
let c1 = { extension: e1, callOnClose() {} };
await kintoImpl.set(e1, { foo: "bar" }, c1);
// We think migration went OK - `get` shouldn't see kinto.
Assert.deepEqual(rustImpl.get(e1, null, c1), {});
info(
"Setting migration failure flag to ensure we delegate to kinto implementation"
);
rustImpl.migrationOk = false;
// get should now be seeing kinto.
Assert.deepEqual(await rustImpl.get(e1, null, c1), { foo: "bar" });
// check everything else delegates.
await rustImpl.set(e1, { foo: "foo" }, c1);
Assert.deepEqual(await kintoImpl.get(e1, null, c1), { foo: "foo" });
Assert.equal(await rustImpl.getBytesInUse(e1, null, c1), 8);
await rustImpl.remove(e1, "foo", c1);
Assert.deepEqual(await kintoImpl.get(e1, null, c1), {});
await rustImpl.set(e1, { foo: "foo" }, c1);
Assert.deepEqual(await kintoImpl.get(e1, null, c1), { foo: "foo" });
await rustImpl.clear(e1, c1);
Assert.deepEqual(await kintoImpl.get(e1, null, c1), {});
});

Просмотреть файл

@ -3,15 +3,40 @@
"use strict";
ChromeUtils.defineESModuleGetters(this, {
extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.sys.mjs",
Service: "resource://services-sync/service.sys.mjs",
});
const NS_ERROR_DOM_QUOTA_EXCEEDED_ERR = 0x80530016;
const { ExtensionStorageEngineBridge } = ChromeUtils.importESModule(
"resource://services-sync/engines/extension-storage.sys.mjs"
XPCOMUtils.defineLazyServiceGetter(
this,
"StorageSyncService",
"@mozilla.org/extensions/storage/sync;1",
"nsIInterfaceRequestor"
);
const SYNC_QUOTA_BYTES = 102400;
function promisify(func, ...params) {
return new Promise((resolve, reject) => {
let changes = [];
func(...params, {
QueryInterface: ChromeUtils.generateQI([
"mozIExtensionStorageListener",
"mozIExtensionStorageCallback",
"mozIBridgedSyncEngineCallback",
"mozIBridgedSyncEngineApplyCallback",
]),
onChanged(extId, json) {
changes.push({ extId, changes: JSON.parse(json) });
},
handleSuccess(value) {
resolve({
changes,
value: typeof value == "string" ? JSON.parse(value) : value,
});
},
handleError(code, message) {
reject(Components.Exception(message, code));
},
});
});
}
add_task(async function setup_storage_sync() {
// So that we can write to the profile directory.
@ -19,95 +44,92 @@ add_task(async function setup_storage_sync() {
});
add_task(async function test_storage_sync_service() {
const service = extensionStorageSync;
const service = StorageSyncService.getInterface(Ci.mozIExtensionStorageArea);
{
// mocking notifyListeners so we have access to the return value of `service.set`
service.notifyListeners = (extId, changeSet) => {
equal(extId, "ext-1");
let expected = {
hi: {
newValue: "hello! 💖",
let { changes, value } = await promisify(
service.set,
"ext-1",
JSON.stringify({
hi: "hello! 💖",
bye: "adiós",
})
);
deepEqual(
changes,
[
{
extId: "ext-1",
changes: {
hi: {
newValue: "hello! 💖",
},
bye: {
newValue: "adiós",
},
},
},
bye: {
newValue: "adiós",
},
};
deepEqual(
[changeSet],
[expected],
"`set` should notify listeners about changes"
);
};
let newValue = {
hi: "hello! 💖",
bye: "adiós",
};
// finalling calling `service.set` which asserts the deepEqual in the above mocked `notifyListeners`
await service.set({ id: "ext-1" }, newValue);
],
"`set` should notify listeners about changes"
);
ok(!value, "`set` should not return a value");
}
{
service.notifyListeners = (_extId, _changeSet) => {
console.log(`NOTIFY LISTENERS`);
};
let { changes, value } = await promisify(
service.get,
"ext-1",
JSON.stringify(["hi"])
);
deepEqual(changes, [], "`get` should not notify listeners");
deepEqual(
value,
{
hi: "hello! 💖",
},
"`get` with key should return value"
);
let expected = {
hi: "hello! 💖",
};
let value = await service.get({ id: "ext-1" }, ["hi"]);
deepEqual(value, expected, "`get` with key should return value");
let expected2 = {
hi: "hello! 💖",
bye: "adiós",
};
let allValues = await service.get({ id: "ext-1" }, null);
let { value: allValues } = await promisify(service.get, "ext-1", "null");
deepEqual(
allValues,
expected2,
{
hi: "hello! 💖",
bye: "adiós",
},
"`get` without a key should return all values"
);
}
{
service.notifyListeners = (extId, changeSet) => {
console.log("notifyListeners", extId, changeSet);
};
let newValue = {
hi: "hola! 👋",
};
await service.set({ id: "ext-2" }, newValue);
await service.clear({ id: "ext-1" });
let allValues = await service.get({ id: "ext-1" }, null);
await promisify(
service.set,
"ext-2",
JSON.stringify({
hi: "hola! 👋",
})
);
await promisify(service.clear, "ext-1");
let { value: allValues } = await promisify(service.get, "ext-1", "null");
deepEqual(allValues, {}, "clear removed ext-1");
let allValues2 = await service.get({ id: "ext-2" }, null);
let expected = { hi: "hola! 👋" };
deepEqual(allValues2, expected, "clear didn't remove ext-2");
let { value: allValues2 } = await promisify(service.get, "ext-2", "null");
deepEqual(allValues2, { hi: "hola! 👋" }, "clear didn't remove ext-2");
// We need to clear data for ext-2 too, so later tests don't fail due to
// this data.
await service.clear({ id: "ext-2" });
await promisify(service.clear, "ext-2");
}
});
add_task(async function test_storage_sync_bridged_engine() {
let engine = new ExtensionStorageEngineBridge(Service);
await engine.initialize();
let area = engine._rustStore;
const area = StorageSyncService.getInterface(Ci.mozIExtensionStorageArea);
const engine = StorageSyncService.getInterface(Ci.mozIBridgedSyncEngine);
info("Add some local items");
await area.set("ext-1", JSON.stringify({ a: "abc" }));
await area.set("ext-2", JSON.stringify({ b: "xyz" }));
await promisify(area.set, "ext-1", JSON.stringify({ a: "abc" }));
await promisify(area.set, "ext-2", JSON.stringify({ b: "xyz" }));
info("Start a sync");
await engine._bridge.syncStarted();
await promisify(engine.syncStarted);
info("Store some incoming synced items");
let incomingEnvelopesAsJSON = [
@ -132,24 +154,20 @@ add_task(async function test_storage_sync_bridged_engine() {
}),
},
].map(e => JSON.stringify(e));
await engine._bridge.storeIncoming(incomingEnvelopesAsJSON);
await promisify(area.storeIncoming, incomingEnvelopesAsJSON);
info("Merge");
// Three levels of JSON wrapping: each outgoing envelope, the cleartext in
// each envelope, and the extension storage data in each cleartext payload.
let outgoingEnvelopesAsJSON = await engine._bridge.apply();
let { value: outgoingEnvelopesAsJSON } = await promisify(area.apply);
let outgoingEnvelopes = outgoingEnvelopesAsJSON.map(json => JSON.parse(json));
let parsedCleartexts = outgoingEnvelopes.map(e => JSON.parse(e.payload));
let parsedData = parsedCleartexts.map(c => JSON.parse(c.data));
let changes = (await area.getSyncedChanges()).map(change => {
return {
extId: change.extId,
changes: JSON.parse(change.changes),
};
});
let { changes } = await promisify(
area.QueryInterface(Ci.mozISyncedExtensionStorageArea)
.fetchPendingSyncChanges
);
deepEqual(
changes,
[
@ -198,17 +216,15 @@ add_task(async function test_storage_sync_bridged_engine() {
);
info("Mark all extensions as uploaded");
// await promisify(engine.setUploaded, 0, [ext1Guid, "guidAAA"]);
await engine._bridge.setUploaded(0, [ext1Guid, "guidAAA"]);
await promisify(engine.setUploaded, 0, [ext1Guid, "guidAAA"]);
info("Finish sync");
// await promisify(engine.syncFinished);
await engine._bridge.syncFinished();
await promisify(engine.syncFinished);
// Try fetching values for the remote-only extension we just synced.
let ext3Value = await area.get("ext-3", "null");
let { value: ext3Value } = await promisify(area.get, "ext-3", "null");
deepEqual(
JSON.parse(ext3Value),
ext3Value,
{
d: "new! ✨",
},
@ -216,52 +232,43 @@ add_task(async function test_storage_sync_bridged_engine() {
);
info("Try applying a second time");
let secondApply = await engine._bridge.apply();
deepEqual(secondApply, {}, "Shouldn't merge anything on second apply");
let secondApply = await promisify(area.apply);
deepEqual(secondApply.value, {}, "Shouldn't merge anything on second apply");
info("Wipe all items");
await engine._bridge.wipe();
await promisify(engine.wipe);
for (let extId of ["ext-1", "ext-2", "ext-3"]) {
// `get` always returns an object, even if there are no keys for the
// extension ID.
let value = await area.get(extId, "null");
deepEqual(
JSON.parse(value),
{},
`Wipe should remove all values for ${extId}`
);
let { value } = await promisify(area.get, extId, "null");
deepEqual(value, {}, `Wipe should remove all values for ${extId}`);
}
});
add_task(async function test_storage_sync_quota() {
let engine = new ExtensionStorageEngineBridge(Service);
await engine.initialize();
let service = engine._rustStore;
const service = StorageSyncService.getInterface(Ci.mozIExtensionStorageArea);
const engine = StorageSyncService.getInterface(Ci.mozIBridgedSyncEngine);
await promisify(engine.wipe);
await promisify(service.set, "ext-1", JSON.stringify({ x: "hi" }));
await promisify(service.set, "ext-1", JSON.stringify({ longer: "value" }));
await engine._bridge.wipe();
await service.set("ext-1", JSON.stringify({ x: "hi" }));
await service.set("ext-1", JSON.stringify({ longer: "value" }));
let v1 = await service.getBytesInUse("ext-1", '"x"');
let { value: v1 } = await promisify(service.getBytesInUse, "ext-1", '"x"');
Assert.equal(v1, 5); // key len without quotes, value len with quotes.
let v2 = await service.getBytesInUse("ext-1", "null");
let { value: v2 } = await promisify(service.getBytesInUse, "ext-1", "null");
// 5 from 'x', plus 'longer' (6 for key, 7 for value = 13) = 18.
Assert.equal(v2, 18);
// Now set something greater than our quota.
let expectedMsg = "QuotaError: Error";
let msg;
try {
await service.set(
await Assert.rejects(
promisify(
service.set,
"ext-1",
JSON.stringify({
big: "x".repeat(SYNC_QUOTA_BYTES),
big: "x".repeat(Ci.mozIExtensionStorageArea.SYNC_QUOTA_BYTES),
})
);
} catch (ex) {
msg = ex.toString();
} finally {
Assert.equal(expectedMsg, msg);
}
),
ex => ex.result == NS_ERROR_DOM_QUOTA_EXCEEDED_ERR,
"should reject with NS_ERROR_DOM_QUOTA_EXCEEDED_ERR"
);
});

Просмотреть файл

@ -19,9 +19,13 @@ add_task(async function setup() {
});
add_task(async function test_contentscript_storage_sync() {
await test_contentscript_storage("sync");
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_contentscript_storage("sync")
);
});
add_task(async function test_contentscript_bytes_in_use_sync() {
await test_contentscript_storage_area_with_bytes_in_use("sync", true);
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_contentscript_storage_area_with_bytes_in_use("sync", true)
);
});

Просмотреть файл

@ -19,9 +19,13 @@ add_task(async function setup() {
});
add_task(async function test_contentscript_storage_sync() {
await test_contentscript_storage("sync");
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_contentscript_storage("sync")
);
});
add_task(async function test_contentscript_storage_no_bytes_in_use() {
await test_contentscript_storage_area_with_bytes_in_use("sync", false);
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_contentscript_storage_area_with_bytes_in_use("sync", false)
);
});

Просмотреть файл

@ -10,18 +10,26 @@ add_task(async function setup() {
await ExtensionTestUtils.startAddonManager();
});
add_task(test_config_flag_needed);
add_task(test_sync_reloading_extensions_works);
add_task(async function test_storage_sync() {
await test_background_page_storage("sync");
add_task(function test_storage_sync() {
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_background_page_storage("sync")
);
});
add_task(test_storage_sync_requires_real_id);
add_task(async function test_bytes_in_use() {
await test_background_storage_area_with_bytes_in_use("sync", true);
add_task(function test_bytes_in_use() {
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_background_storage_area_with_bytes_in_use("sync", true)
);
});
add_task(async function test_storage_onChanged_event_page() {
await test_storage_change_event_page("sync");
add_task(function test_storage_onChanged_event_page() {
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_storage_change_event_page("sync")
);
});

Просмотреть файл

@ -483,7 +483,7 @@ async function withServer(f) {
// KintoServer. This is meant as a workaround for eslint's refusal to
// let me have 5 nested callbacks.
async function withContextAndServer(f) {
await withContext(async function (context) {
await withSyncContext(async function (context) {
await withServer(async function (server) {
await f(context, server);
});
@ -1396,7 +1396,7 @@ add_task(async function checkSyncKeyRing_overwrites_on_conflict() {
// overwrite it with our keys.
const extensionId = uuid();
let extensionKey;
await withContext(async function () {
await withSyncContext(async function () {
await withServer(async function (server) {
// The old device has this kbHash, which is very similar to the
// current kbHash but with the last character changed.
@ -1500,7 +1500,7 @@ add_task(async function checkSyncKeyRing_flushes_on_uuid_change() {
// keyring, so reset sync state and reupload everything.
const extensionId = uuid();
const extension = { id: extensionId };
await withContext(async function (context) {
await withSyncContext(async function (context) {
await withServer(async function (server) {
server.installCollection("storage-sync-crypto");
server.installDeleteBucket();
@ -2284,19 +2284,26 @@ add_task(async function test_storage_sync_pushes_deletes() {
});
// Some sync tests shared between implementations.
add_task(test_config_flag_needed);
add_task(test_sync_reloading_extensions_works);
add_task(async function test_storage_sync() {
await test_background_page_storage("sync");
add_task(function test_storage_sync() {
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_background_page_storage("sync")
);
});
add_task(test_storage_sync_requires_real_id);
add_task(async function test_storage_sync_with_bytes_in_use() {
await test_background_storage_area_with_bytes_in_use("sync", false);
add_task(function test_storage_sync_with_bytes_in_use() {
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_background_storage_area_with_bytes_in_use("sync", false)
);
});
add_task(async function test_storage_onChanged_event_page() {
await test_storage_change_event_page("sync");
add_task(function test_storage_onChanged_event_page() {
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
test_storage_change_event_page("sync")
);
});

Просмотреть файл

@ -43,6 +43,12 @@ run-if = ["os == 'android'"] # Android has no remote extensions, Bug 1535365
["test_ExtensionShortcutKeyMap.js"]
["test_ExtensionStorageSync_migration_kinto.js"]
skip-if = [
"os == 'android'", # Not shipped on Android
"condprof", # Bug 1769184 - by design for now
]
["test_MatchPattern.js"]
["test_OriginControls.js"]

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -10,7 +10,6 @@ EXTRA_JS_MODULES += [
"generated/RustSuggest.sys.mjs",
"generated/RustSync15.sys.mjs",
"generated/RustTabs.sys.mjs",
"generated/RustWebextstorage.sys.mjs",
]
with Files("**"):

Просмотреть файл

@ -47,10 +47,6 @@ main = [
"RelevancyStore.interrupt",
]
[webext-storage]
crate_name = "webext_storage"
udl_file = "third_party/rust/webext-storage/src/webext-storage.udl"
[remote_settings]
crate_name = "remote_settings"
udl_file = "third_party/rust/remote_settings/src/remote_settings.udl"

Просмотреть файл

@ -376,7 +376,7 @@ export function add(a,b) {
throw e;
}
return UniFFIScaffolding.callAsync(
82, // arithmetic:uniffi_arithmetical_fn_func_add
59, // arithmetic:uniffi_arithmetical_fn_func_add
FfiConverterU64.lower(a),
FfiConverterU64.lower(b),
)
@ -410,7 +410,7 @@ export function div(dividend,divisor) {
throw e;
}
return UniFFIScaffolding.callAsync(
83, // arithmetic:uniffi_arithmetical_fn_func_div
60, // arithmetic:uniffi_arithmetical_fn_func_div
FfiConverterU64.lower(dividend),
FfiConverterU64.lower(divisor),
)
@ -444,7 +444,7 @@ export function equal(a,b) {
throw e;
}
return UniFFIScaffolding.callAsync(
84, // arithmetic:uniffi_arithmetical_fn_func_equal
61, // arithmetic:uniffi_arithmetical_fn_func_equal
FfiConverterU64.lower(a),
FfiConverterU64.lower(b),
)
@ -478,7 +478,7 @@ export function sub(a,b) {
throw e;
}
return UniFFIScaffolding.callAsync(
85, // arithmetic:uniffi_arithmetical_fn_func_sub
62, // arithmetic:uniffi_arithmetical_fn_func_sub
FfiConverterU64.lower(a),
FfiConverterU64.lower(b),
)

Просмотреть файл

@ -454,7 +454,7 @@ export function getCustomTypesDemo(demo) {
throw e;
}
return UniFFIScaffolding.callAsync(
86, // custom_types:uniffi_uniffi_custom_types_fn_func_get_custom_types_demo
63, // custom_types:uniffi_uniffi_custom_types_fn_func_get_custom_types_demo
FfiConverterOptionalTypeCustomTypesDemo.lower(demo),
)
}

Просмотреть файл

@ -380,7 +380,7 @@ export function gradient(value) {
throw e;
}
return UniFFIScaffolding.callAsync(
87, // external_types:uniffi_uniffi_fixture_external_types_fn_func_gradient
64, // external_types:uniffi_uniffi_fixture_external_types_fn_func_gradient
FfiConverterOptionalTypeLine.lower(value),
)
}
@ -413,7 +413,7 @@ export function intersection(ln1,ln2) {
throw e;
}
return UniFFIScaffolding.callAsync(
88, // external_types:uniffi_uniffi_fixture_external_types_fn_func_intersection
65, // external_types:uniffi_uniffi_fixture_external_types_fn_func_intersection
FfiConverterTypeLine.lower(ln1),
FfiConverterTypeLine.lower(ln2),
)

Просмотреть файл

@ -473,7 +473,7 @@ export function gradient(ln) {
throw e;
}
return UniFFIScaffolding.callAsync(
93, // geometry:uniffi_uniffi_geometry_fn_func_gradient
70, // geometry:uniffi_uniffi_geometry_fn_func_gradient
FfiConverterTypeLine.lower(ln),
)
}
@ -506,7 +506,7 @@ export function intersection(ln1,ln2) {
throw e;
}
return UniFFIScaffolding.callAsync(
94, // geometry:uniffi_uniffi_geometry_fn_func_intersection
71, // geometry:uniffi_uniffi_geometry_fn_func_intersection
FfiConverterTypeLine.lower(ln1),
FfiConverterTypeLine.lower(ln2),
)

Просмотреть файл

@ -158,7 +158,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
readPointerSingletonObject() {
const pointerId = 9; // refcounts:SingletonObject
const pointerId = 7; // refcounts:SingletonObject
const res = UniFFIScaffolding.readPointer(pointerId, this.dataView.buffer, this.pos);
this.pos += 8;
return res;
@ -168,7 +168,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
writePointerSingletonObject(value) {
const pointerId = 9; // refcounts:SingletonObject
const pointerId = 7; // refcounts:SingletonObject
UniFFIScaffolding.writePointer(pointerId, value, this.dataView.buffer, this.pos);
this.pos += 8;
}
@ -320,7 +320,7 @@ export class SingletonObject {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callSync(
90, // refcounts:uniffi_uniffi_fixture_refcounts_fn_method_singletonobject_method
67, // refcounts:uniffi_uniffi_fixture_refcounts_fn_method_singletonobject_method
FfiConverterTypeSingletonObject.lower(this),
)
}
@ -368,7 +368,7 @@ export function getJsRefcount() {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callSync(
91, // refcounts:uniffi_uniffi_fixture_refcounts_fn_func_get_js_refcount
68, // refcounts:uniffi_uniffi_fixture_refcounts_fn_func_get_js_refcount
)
}
return handleRustResult(functionCall(), liftResult, liftError);
@ -380,7 +380,7 @@ export function getSingleton() {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callSync(
92, // refcounts:uniffi_uniffi_fixture_refcounts_fn_func_get_singleton
69, // refcounts:uniffi_uniffi_fixture_refcounts_fn_func_get_singleton
)
}
return handleRustResult(functionCall(), liftResult, liftError);

Просмотреть файл

@ -158,7 +158,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
readPointerOptionneur() {
const pointerId = 10; // rondpoint:Optionneur
const pointerId = 8; // rondpoint:Optionneur
const res = UniFFIScaffolding.readPointer(pointerId, this.dataView.buffer, this.pos);
this.pos += 8;
return res;
@ -168,7 +168,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
writePointerOptionneur(value) {
const pointerId = 10; // rondpoint:Optionneur
const pointerId = 8; // rondpoint:Optionneur
UniFFIScaffolding.writePointer(pointerId, value, this.dataView.buffer, this.pos);
this.pos += 8;
}
@ -178,7 +178,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
readPointerRetourneur() {
const pointerId = 11; // rondpoint:Retourneur
const pointerId = 9; // rondpoint:Retourneur
const res = UniFFIScaffolding.readPointer(pointerId, this.dataView.buffer, this.pos);
this.pos += 8;
return res;
@ -188,7 +188,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
writePointerRetourneur(value) {
const pointerId = 11; // rondpoint:Retourneur
const pointerId = 9; // rondpoint:Retourneur
UniFFIScaffolding.writePointer(pointerId, value, this.dataView.buffer, this.pos);
this.pos += 8;
}
@ -198,7 +198,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
readPointerStringifier() {
const pointerId = 12; // rondpoint:Stringifier
const pointerId = 10; // rondpoint:Stringifier
const res = UniFFIScaffolding.readPointer(pointerId, this.dataView.buffer, this.pos);
this.pos += 8;
return res;
@ -208,7 +208,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
writePointerStringifier(value) {
const pointerId = 12; // rondpoint:Stringifier
const pointerId = 10; // rondpoint:Stringifier
UniFFIScaffolding.writePointer(pointerId, value, this.dataView.buffer, this.pos);
this.pos += 8;
}
@ -619,7 +619,7 @@ export class Optionneur {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
96, // rondpoint:uniffi_uniffi_rondpoint_fn_constructor_optionneur_new
73, // rondpoint:uniffi_uniffi_rondpoint_fn_constructor_optionneur_new
)
}
try {
@ -641,7 +641,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
97, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_boolean
74, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_boolean
FfiConverterTypeOptionneur.lower(this),
FfiConverterBool.lower(value),
)
@ -666,7 +666,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
98, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_enum
75, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_enum
FfiConverterTypeOptionneur.lower(this),
FfiConverterTypeEnumeration.lower(value),
)
@ -691,7 +691,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
99, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_f32
76, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_f32
FfiConverterTypeOptionneur.lower(this),
FfiConverterF32.lower(value),
)
@ -716,7 +716,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
100, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_f64
77, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_f64
FfiConverterTypeOptionneur.lower(this),
FfiConverterF64.lower(value),
)
@ -741,7 +741,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
101, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i16_dec
78, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i16_dec
FfiConverterTypeOptionneur.lower(this),
FfiConverterI16.lower(value),
)
@ -766,7 +766,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
102, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i16_hex
79, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i16_hex
FfiConverterTypeOptionneur.lower(this),
FfiConverterI16.lower(value),
)
@ -791,7 +791,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
103, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i32_dec
80, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i32_dec
FfiConverterTypeOptionneur.lower(this),
FfiConverterI32.lower(value),
)
@ -816,7 +816,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
104, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i32_hex
81, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i32_hex
FfiConverterTypeOptionneur.lower(this),
FfiConverterI32.lower(value),
)
@ -841,7 +841,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
105, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i64_dec
82, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i64_dec
FfiConverterTypeOptionneur.lower(this),
FfiConverterI64.lower(value),
)
@ -866,7 +866,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
106, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i64_hex
83, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i64_hex
FfiConverterTypeOptionneur.lower(this),
FfiConverterI64.lower(value),
)
@ -891,7 +891,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
107, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i8_dec
84, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i8_dec
FfiConverterTypeOptionneur.lower(this),
FfiConverterI8.lower(value),
)
@ -916,7 +916,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
108, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i8_hex
85, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_i8_hex
FfiConverterTypeOptionneur.lower(this),
FfiConverterI8.lower(value),
)
@ -941,7 +941,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
109, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_null
86, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_null
FfiConverterTypeOptionneur.lower(this),
FfiConverterOptionalstring.lower(value),
)
@ -966,7 +966,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
110, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_sequence
87, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_sequence
FfiConverterTypeOptionneur.lower(this),
FfiConverterSequencestring.lower(value),
)
@ -991,7 +991,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
111, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_string
88, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_string
FfiConverterTypeOptionneur.lower(this),
FfiConverterString.lower(value),
)
@ -1016,7 +1016,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
112, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u16_dec
89, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u16_dec
FfiConverterTypeOptionneur.lower(this),
FfiConverterU16.lower(value),
)
@ -1041,7 +1041,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
113, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u16_hex
90, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u16_hex
FfiConverterTypeOptionneur.lower(this),
FfiConverterU16.lower(value),
)
@ -1066,7 +1066,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
114, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u32_dec
91, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u32_dec
FfiConverterTypeOptionneur.lower(this),
FfiConverterU32.lower(value),
)
@ -1091,7 +1091,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
115, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u32_hex
92, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u32_hex
FfiConverterTypeOptionneur.lower(this),
FfiConverterU32.lower(value),
)
@ -1116,7 +1116,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
116, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u32_oct
93, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u32_oct
FfiConverterTypeOptionneur.lower(this),
FfiConverterU32.lower(value),
)
@ -1141,7 +1141,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
117, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u64_dec
94, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u64_dec
FfiConverterTypeOptionneur.lower(this),
FfiConverterU64.lower(value),
)
@ -1166,7 +1166,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
118, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u64_hex
95, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u64_hex
FfiConverterTypeOptionneur.lower(this),
FfiConverterU64.lower(value),
)
@ -1191,7 +1191,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
119, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u8_dec
96, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u8_dec
FfiConverterTypeOptionneur.lower(this),
FfiConverterU8.lower(value),
)
@ -1216,7 +1216,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
120, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u8_hex
97, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_u8_hex
FfiConverterTypeOptionneur.lower(this),
FfiConverterU8.lower(value),
)
@ -1241,7 +1241,7 @@ export class Optionneur {
throw e;
}
return UniFFIScaffolding.callAsync(
121, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_zero
98, // rondpoint:uniffi_uniffi_rondpoint_fn_method_optionneur_sinon_zero
FfiConverterTypeOptionneur.lower(this),
FfiConverterOptionali32.lower(value),
)
@ -1308,7 +1308,7 @@ export class Retourneur {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
123, // rondpoint:uniffi_uniffi_rondpoint_fn_constructor_retourneur_new
100, // rondpoint:uniffi_uniffi_rondpoint_fn_constructor_retourneur_new
)
}
try {
@ -1330,7 +1330,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
124, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_boolean
101, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_boolean
FfiConverterTypeRetourneur.lower(this),
FfiConverterBool.lower(value),
)
@ -1355,7 +1355,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
125, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_double
102, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_double
FfiConverterTypeRetourneur.lower(this),
FfiConverterF64.lower(value),
)
@ -1380,7 +1380,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
126, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_float
103, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_float
FfiConverterTypeRetourneur.lower(this),
FfiConverterF32.lower(value),
)
@ -1405,7 +1405,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
127, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_i16
104, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_i16
FfiConverterTypeRetourneur.lower(this),
FfiConverterI16.lower(value),
)
@ -1430,7 +1430,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
128, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_i32
105, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_i32
FfiConverterTypeRetourneur.lower(this),
FfiConverterI32.lower(value),
)
@ -1455,7 +1455,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
129, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_i64
106, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_i64
FfiConverterTypeRetourneur.lower(this),
FfiConverterI64.lower(value),
)
@ -1480,7 +1480,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
130, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_i8
107, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_i8
FfiConverterTypeRetourneur.lower(this),
FfiConverterI8.lower(value),
)
@ -1505,7 +1505,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
131, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_nombres
108, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_nombres
FfiConverterTypeRetourneur.lower(this),
FfiConverterTypeDictionnaireNombres.lower(value),
)
@ -1530,7 +1530,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
132, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_nombres_signes
109, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_nombres_signes
FfiConverterTypeRetourneur.lower(this),
FfiConverterTypeDictionnaireNombresSignes.lower(value),
)
@ -1555,7 +1555,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
133, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_optionneur_dictionnaire
110, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_optionneur_dictionnaire
FfiConverterTypeRetourneur.lower(this),
FfiConverterTypeOptionneurDictionnaire.lower(value),
)
@ -1580,7 +1580,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
134, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_string
111, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_string
FfiConverterTypeRetourneur.lower(this),
FfiConverterString.lower(value),
)
@ -1605,7 +1605,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
135, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_u16
112, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_u16
FfiConverterTypeRetourneur.lower(this),
FfiConverterU16.lower(value),
)
@ -1630,7 +1630,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
136, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_u32
113, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_u32
FfiConverterTypeRetourneur.lower(this),
FfiConverterU32.lower(value),
)
@ -1655,7 +1655,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
137, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_u64
114, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_u64
FfiConverterTypeRetourneur.lower(this),
FfiConverterU64.lower(value),
)
@ -1680,7 +1680,7 @@ export class Retourneur {
throw e;
}
return UniFFIScaffolding.callAsync(
138, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_u8
115, // rondpoint:uniffi_uniffi_rondpoint_fn_method_retourneur_identique_u8
FfiConverterTypeRetourneur.lower(this),
FfiConverterU8.lower(value),
)
@ -1747,7 +1747,7 @@ export class Stringifier {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
140, // rondpoint:uniffi_uniffi_rondpoint_fn_constructor_stringifier_new
117, // rondpoint:uniffi_uniffi_rondpoint_fn_constructor_stringifier_new
)
}
try {
@ -1769,7 +1769,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
141, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_boolean
118, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_boolean
FfiConverterTypeStringifier.lower(this),
FfiConverterBool.lower(value),
)
@ -1794,7 +1794,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
142, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_double
119, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_double
FfiConverterTypeStringifier.lower(this),
FfiConverterF64.lower(value),
)
@ -1819,7 +1819,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
143, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_float
120, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_float
FfiConverterTypeStringifier.lower(this),
FfiConverterF32.lower(value),
)
@ -1844,7 +1844,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
144, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_i16
121, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_i16
FfiConverterTypeStringifier.lower(this),
FfiConverterI16.lower(value),
)
@ -1869,7 +1869,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
145, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_i32
122, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_i32
FfiConverterTypeStringifier.lower(this),
FfiConverterI32.lower(value),
)
@ -1894,7 +1894,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
146, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_i64
123, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_i64
FfiConverterTypeStringifier.lower(this),
FfiConverterI64.lower(value),
)
@ -1919,7 +1919,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
147, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_i8
124, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_i8
FfiConverterTypeStringifier.lower(this),
FfiConverterI8.lower(value),
)
@ -1944,7 +1944,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
148, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_u16
125, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_u16
FfiConverterTypeStringifier.lower(this),
FfiConverterU16.lower(value),
)
@ -1969,7 +1969,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
149, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_u32
126, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_u32
FfiConverterTypeStringifier.lower(this),
FfiConverterU32.lower(value),
)
@ -1994,7 +1994,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
150, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_u64
127, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_u64
FfiConverterTypeStringifier.lower(this),
FfiConverterU64.lower(value),
)
@ -2019,7 +2019,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
151, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_u8
128, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_to_string_u8
FfiConverterTypeStringifier.lower(this),
FfiConverterU8.lower(value),
)
@ -2044,7 +2044,7 @@ export class Stringifier {
throw e;
}
return UniFFIScaffolding.callAsync(
152, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_well_known_string
129, // rondpoint:uniffi_uniffi_rondpoint_fn_method_stringifier_well_known_string
FfiConverterTypeStringifier.lower(this),
FfiConverterString.lower(value),
)
@ -3283,7 +3283,7 @@ export function copieCarte(c) {
throw e;
}
return UniFFIScaffolding.callAsync(
153, // rondpoint:uniffi_uniffi_rondpoint_fn_func_copie_carte
130, // rondpoint:uniffi_uniffi_rondpoint_fn_func_copie_carte
FfiConverterMapStringTypeEnumerationAvecDonnees.lower(c),
)
}
@ -3308,7 +3308,7 @@ export function copieDictionnaire(d) {
throw e;
}
return UniFFIScaffolding.callAsync(
154, // rondpoint:uniffi_uniffi_rondpoint_fn_func_copie_dictionnaire
131, // rondpoint:uniffi_uniffi_rondpoint_fn_func_copie_dictionnaire
FfiConverterTypeDictionnaire.lower(d),
)
}
@ -3333,7 +3333,7 @@ export function copieEnumeration(e) {
throw e;
}
return UniFFIScaffolding.callAsync(
155, // rondpoint:uniffi_uniffi_rondpoint_fn_func_copie_enumeration
132, // rondpoint:uniffi_uniffi_rondpoint_fn_func_copie_enumeration
FfiConverterTypeEnumeration.lower(e),
)
}
@ -3358,7 +3358,7 @@ export function copieEnumerations(e) {
throw e;
}
return UniFFIScaffolding.callAsync(
156, // rondpoint:uniffi_uniffi_rondpoint_fn_func_copie_enumerations
133, // rondpoint:uniffi_uniffi_rondpoint_fn_func_copie_enumerations
FfiConverterSequenceTypeEnumeration.lower(e),
)
}
@ -3383,7 +3383,7 @@ export function switcheroo(b) {
throw e;
}
return UniFFIScaffolding.callAsync(
157, // rondpoint:uniffi_uniffi_rondpoint_fn_func_switcheroo
134, // rondpoint:uniffi_uniffi_rondpoint_fn_func_switcheroo
FfiConverterBool.lower(b),
)
}

Просмотреть файл

@ -158,7 +158,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
readPointerSprite() {
const pointerId = 13; // sprites:Sprite
const pointerId = 11; // sprites:Sprite
const res = UniFFIScaffolding.readPointer(pointerId, this.dataView.buffer, this.pos);
this.pos += 8;
return res;
@ -168,7 +168,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
writePointerSprite(value) {
const pointerId = 13; // sprites:Sprite
const pointerId = 11; // sprites:Sprite
UniFFIScaffolding.writePointer(pointerId, value, this.dataView.buffer, this.pos);
this.pos += 8;
}
@ -324,7 +324,7 @@ export class Sprite {
throw e;
}
return UniFFIScaffolding.callAsync(
159, // sprites:uniffi_uniffi_sprites_fn_constructor_sprite_new
136, // sprites:uniffi_uniffi_sprites_fn_constructor_sprite_new
FfiConverterOptionalTypePoint.lower(initialPosition),
)
}
@ -360,7 +360,7 @@ export class Sprite {
throw e;
}
return UniFFIScaffolding.callAsync(
160, // sprites:uniffi_uniffi_sprites_fn_constructor_sprite_new_relative_to
137, // sprites:uniffi_uniffi_sprites_fn_constructor_sprite_new_relative_to
FfiConverterTypePoint.lower(reference),
FfiConverterTypeVector.lower(direction),
)
@ -376,7 +376,7 @@ export class Sprite {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
161, // sprites:uniffi_uniffi_sprites_fn_method_sprite_get_position
138, // sprites:uniffi_uniffi_sprites_fn_method_sprite_get_position
FfiConverterTypeSprite.lower(this),
)
}
@ -400,7 +400,7 @@ export class Sprite {
throw e;
}
return UniFFIScaffolding.callAsync(
162, // sprites:uniffi_uniffi_sprites_fn_method_sprite_move_by
139, // sprites:uniffi_uniffi_sprites_fn_method_sprite_move_by
FfiConverterTypeSprite.lower(this),
FfiConverterTypeVector.lower(direction),
)
@ -425,7 +425,7 @@ export class Sprite {
throw e;
}
return UniFFIScaffolding.callAsync(
163, // sprites:uniffi_uniffi_sprites_fn_method_sprite_move_to
140, // sprites:uniffi_uniffi_sprites_fn_method_sprite_move_to
FfiConverterTypeSprite.lower(this),
FfiConverterTypePoint.lower(position),
)
@ -677,7 +677,7 @@ export function translate(position,direction) {
throw e;
}
return UniFFIScaffolding.callAsync(
164, // sprites:uniffi_uniffi_sprites_fn_func_translate
141, // sprites:uniffi_uniffi_sprites_fn_func_translate
FfiConverterTypePoint.lower(position),
FfiConverterTypeVector.lower(direction),
)

Просмотреть файл

@ -158,7 +158,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
readPointerTodoList() {
const pointerId = 14; // todolist:TodoList
const pointerId = 12; // todolist:TodoList
const res = UniFFIScaffolding.readPointer(pointerId, this.dataView.buffer, this.pos);
this.pos += 8;
return res;
@ -168,7 +168,7 @@ class ArrayBufferDataStream {
// UniFFI Pointers are **always** 8 bytes long. That is enforced
// by the C++ and Rust Scaffolding code.
writePointerTodoList(value) {
const pointerId = 14; // todolist:TodoList
const pointerId = 12; // todolist:TodoList
UniFFIScaffolding.writePointer(pointerId, value, this.dataView.buffer, this.pos);
this.pos += 8;
}
@ -297,7 +297,7 @@ export class TodoList {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
166, // todolist:uniffi_uniffi_todolist_fn_constructor_todolist_new
143, // todolist:uniffi_uniffi_todolist_fn_constructor_todolist_new
)
}
try {
@ -319,7 +319,7 @@ export class TodoList {
throw e;
}
return UniFFIScaffolding.callAsync(
167, // todolist:uniffi_uniffi_todolist_fn_method_todolist_add_entries
144, // todolist:uniffi_uniffi_todolist_fn_method_todolist_add_entries
FfiConverterTypeTodoList.lower(this),
FfiConverterSequenceTypeTodoEntry.lower(entries),
)
@ -344,7 +344,7 @@ export class TodoList {
throw e;
}
return UniFFIScaffolding.callAsync(
168, // todolist:uniffi_uniffi_todolist_fn_method_todolist_add_entry
145, // todolist:uniffi_uniffi_todolist_fn_method_todolist_add_entry
FfiConverterTypeTodoList.lower(this),
FfiConverterTypeTodoEntry.lower(entry),
)
@ -369,7 +369,7 @@ export class TodoList {
throw e;
}
return UniFFIScaffolding.callAsync(
169, // todolist:uniffi_uniffi_todolist_fn_method_todolist_add_item
146, // todolist:uniffi_uniffi_todolist_fn_method_todolist_add_item
FfiConverterTypeTodoList.lower(this),
FfiConverterString.lower(todo),
)
@ -394,7 +394,7 @@ export class TodoList {
throw e;
}
return UniFFIScaffolding.callAsync(
170, // todolist:uniffi_uniffi_todolist_fn_method_todolist_add_items
147, // todolist:uniffi_uniffi_todolist_fn_method_todolist_add_items
FfiConverterTypeTodoList.lower(this),
FfiConverterSequencestring.lower(items),
)
@ -419,7 +419,7 @@ export class TodoList {
throw e;
}
return UniFFIScaffolding.callAsync(
171, // todolist:uniffi_uniffi_todolist_fn_method_todolist_clear_item
148, // todolist:uniffi_uniffi_todolist_fn_method_todolist_clear_item
FfiConverterTypeTodoList.lower(this),
FfiConverterString.lower(todo),
)
@ -436,7 +436,7 @@ export class TodoList {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
172, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_entries
149, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_entries
FfiConverterTypeTodoList.lower(this),
)
}
@ -452,7 +452,7 @@ export class TodoList {
const liftError = (data) => FfiConverterTypeTodoError.lift(data);
const functionCall = () => {
return UniFFIScaffolding.callAsync(
173, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_first
150, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_first
FfiConverterTypeTodoList.lower(this),
)
}
@ -468,7 +468,7 @@ export class TodoList {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
174, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_items
151, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_items
FfiConverterTypeTodoList.lower(this),
)
}
@ -484,7 +484,7 @@ export class TodoList {
const liftError = (data) => FfiConverterTypeTodoError.lift(data);
const functionCall = () => {
return UniFFIScaffolding.callAsync(
175, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_last
152, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_last
FfiConverterTypeTodoList.lower(this),
)
}
@ -500,7 +500,7 @@ export class TodoList {
const liftError = (data) => FfiConverterTypeTodoError.lift(data);
const functionCall = () => {
return UniFFIScaffolding.callAsync(
176, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_last_entry
153, // todolist:uniffi_uniffi_todolist_fn_method_todolist_get_last_entry
FfiConverterTypeTodoList.lower(this),
)
}
@ -516,7 +516,7 @@ export class TodoList {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
177, // todolist:uniffi_uniffi_todolist_fn_method_todolist_make_default
154, // todolist:uniffi_uniffi_todolist_fn_method_todolist_make_default
FfiConverterTypeTodoList.lower(this),
)
}
@ -879,7 +879,7 @@ export function createEntryWith(todo) {
throw e;
}
return UniFFIScaffolding.callAsync(
178, // todolist:uniffi_uniffi_todolist_fn_func_create_entry_with
155, // todolist:uniffi_uniffi_todolist_fn_func_create_entry_with
FfiConverterString.lower(todo),
)
}
@ -896,7 +896,7 @@ export function getDefaultList() {
const liftError = null;
const functionCall = () => {
return UniFFIScaffolding.callAsync(
179, // todolist:uniffi_uniffi_todolist_fn_func_get_default_list
156, // todolist:uniffi_uniffi_todolist_fn_func_get_default_list
)
}
try {
@ -920,7 +920,7 @@ export function setDefaultList(list) {
throw e;
}
return UniFFIScaffolding.callAsync(
180, // todolist:uniffi_uniffi_todolist_fn_func_set_default_list
157, // todolist:uniffi_uniffi_todolist_fn_func_set_default_list
FfiConverterTypeTodoList.lower(list),
)
}

Просмотреть файл

@ -4173,283 +4173,283 @@ public:
UniquePtr<UniffiHandlerBase> UniFFIFixturesGetHandler(uint64_t aId) {
switch (aId) {
case 82: {
case 59: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiArithmeticalFnFuncAdd>();
}
case 83: {
case 60: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiArithmeticalFnFuncDiv>();
}
case 84: {
case 61: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiArithmeticalFnFuncEqual>();
}
case 85: {
case 62: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiArithmeticalFnFuncSub>();
}
case 86: {
case 63: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiCustomTypesFnFuncGetCustomTypesDemo>();
}
case 87: {
case 64: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiFixtureExternalTypesFnFuncGradient>();
}
case 88: {
case 65: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiFixtureExternalTypesFnFuncIntersection>();
}
case 90: {
case 67: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiFixtureRefcountsFnMethodSingletonobjectMethod>();
}
case 91: {
case 68: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiFixtureRefcountsFnFuncGetJsRefcount>();
}
case 92: {
case 69: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiFixtureRefcountsFnFuncGetSingleton>();
}
case 93: {
case 70: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiGeometryFnFuncGradient>();
}
case 94: {
case 71: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiGeometryFnFuncIntersection>();
}
case 96: {
case 73: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnConstructorOptionneurNew>();
}
case 97: {
case 74: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonBoolean>();
}
case 98: {
case 75: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonEnum>();
}
case 99: {
case 76: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonF32>();
}
case 100: {
case 77: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonF64>();
}
case 101: {
case 78: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonI16Dec>();
}
case 102: {
case 79: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonI16Hex>();
}
case 103: {
case 80: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonI32Dec>();
}
case 104: {
case 81: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonI32Hex>();
}
case 105: {
case 82: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonI64Dec>();
}
case 106: {
case 83: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonI64Hex>();
}
case 107: {
case 84: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonI8Dec>();
}
case 108: {
case 85: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonI8Hex>();
}
case 109: {
case 86: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonNull>();
}
case 110: {
case 87: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonSequence>();
}
case 111: {
case 88: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonString>();
}
case 112: {
case 89: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU16Dec>();
}
case 113: {
case 90: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU16Hex>();
}
case 114: {
case 91: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU32Dec>();
}
case 115: {
case 92: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU32Hex>();
}
case 116: {
case 93: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU32Oct>();
}
case 117: {
case 94: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU64Dec>();
}
case 118: {
case 95: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU64Hex>();
}
case 119: {
case 96: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU8Dec>();
}
case 120: {
case 97: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonU8Hex>();
}
case 121: {
case 98: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodOptionneurSinonZero>();
}
case 123: {
case 100: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnConstructorRetourneurNew>();
}
case 124: {
case 101: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueBoolean>();
}
case 125: {
case 102: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueDouble>();
}
case 126: {
case 103: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueFloat>();
}
case 127: {
case 104: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueI16>();
}
case 128: {
case 105: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueI32>();
}
case 129: {
case 106: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueI64>();
}
case 130: {
case 107: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueI8>();
}
case 131: {
case 108: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueNombres>();
}
case 132: {
case 109: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueNombresSignes>();
}
case 133: {
case 110: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueOptionneurDictionnaire>();
}
case 134: {
case 111: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueString>();
}
case 135: {
case 112: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueU16>();
}
case 136: {
case 113: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueU32>();
}
case 137: {
case 114: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueU64>();
}
case 138: {
case 115: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodRetourneurIdentiqueU8>();
}
case 140: {
case 117: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnConstructorStringifierNew>();
}
case 141: {
case 118: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringBoolean>();
}
case 142: {
case 119: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringDouble>();
}
case 143: {
case 120: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringFloat>();
}
case 144: {
case 121: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringI16>();
}
case 145: {
case 122: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringI32>();
}
case 146: {
case 123: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringI64>();
}
case 147: {
case 124: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringI8>();
}
case 148: {
case 125: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringU16>();
}
case 149: {
case 126: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringU32>();
}
case 150: {
case 127: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringU64>();
}
case 151: {
case 128: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierToStringU8>();
}
case 152: {
case 129: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnMethodStringifierWellKnownString>();
}
case 153: {
case 130: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnFuncCopieCarte>();
}
case 154: {
case 131: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnFuncCopieDictionnaire>();
}
case 155: {
case 132: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnFuncCopieEnumeration>();
}
case 156: {
case 133: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnFuncCopieEnumerations>();
}
case 157: {
case 134: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiRondpointFnFuncSwitcheroo>();
}
case 159: {
case 136: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiSpritesFnConstructorSpriteNew>();
}
case 160: {
case 137: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiSpritesFnConstructorSpriteNewRelativeTo>();
}
case 161: {
case 138: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiSpritesFnMethodSpriteGetPosition>();
}
case 162: {
case 139: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiSpritesFnMethodSpriteMoveBy>();
}
case 163: {
case 140: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiSpritesFnMethodSpriteMoveTo>();
}
case 164: {
case 141: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiSpritesFnFuncTranslate>();
}
case 166: {
case 143: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnConstructorTodolistNew>();
}
case 167: {
case 144: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistAddEntries>();
}
case 168: {
case 145: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistAddEntry>();
}
case 169: {
case 146: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistAddItem>();
}
case 170: {
case 147: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistAddItems>();
}
case 171: {
case 148: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistClearItem>();
}
case 172: {
case 149: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistGetEntries>();
}
case 173: {
case 150: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistGetFirst>();
}
case 174: {
case 151: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistGetItems>();
}
case 175: {
case 152: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistGetLast>();
}
case 176: {
case 153: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistGetLastEntry>();
}
case 177: {
case 154: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnMethodTodolistMakeDefault>();
}
case 178: {
case 155: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnFuncCreateEntryWith>();
}
case 179: {
case 156: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnFuncGetDefaultList>();
}
case 180: {
case 157: {
return MakeUnique<ScaffoldingCallHandlerUniFFIFixturesUniffiUniffiTodolistFnFuncSetDefaultList>();
}
@ -4461,27 +4461,27 @@ UniquePtr<UniffiHandlerBase> UniFFIFixturesGetHandler(uint64_t aId) {
Maybe<already_AddRefed<UniFFIPointer>> UniFFIFixturesReadPointer(const GlobalObject& aGlobal, uint64_t aId, const ArrayBuffer& aArrayBuff, long aPosition, ErrorResult& aError) {
const UniFFIPointerType* type;
switch (aId) {
case 9: { // refcounts:SingletonObject
case 7: { // refcounts:SingletonObject
type = &kRefcountsSingletonObjectPointerType;
break;
}
case 10: { // rondpoint:Optionneur
case 8: { // rondpoint:Optionneur
type = &kRondpointOptionneurPointerType;
break;
}
case 11: { // rondpoint:Retourneur
case 9: { // rondpoint:Retourneur
type = &kRondpointRetourneurPointerType;
break;
}
case 12: { // rondpoint:Stringifier
case 10: { // rondpoint:Stringifier
type = &kRondpointStringifierPointerType;
break;
}
case 13: { // sprites:Sprite
case 11: { // sprites:Sprite
type = &kSpritesSpritePointerType;
break;
}
case 14: { // todolist:TodoList
case 12: { // todolist:TodoList
type = &kTodolistTodoListPointerType;
break;
}
@ -4494,27 +4494,27 @@ Maybe<already_AddRefed<UniFFIPointer>> UniFFIFixturesReadPointer(const GlobalObj
bool UniFFIFixturesWritePointer(const GlobalObject& aGlobal, uint64_t aId, const UniFFIPointer& aPtr, const ArrayBuffer& aArrayBuff, long aPosition, ErrorResult& aError) {
const UniFFIPointerType* type;
switch (aId) {
case 9: { // refcounts:SingletonObject
case 7: { // refcounts:SingletonObject
type = &kRefcountsSingletonObjectPointerType;
break;
}
case 10: { // rondpoint:Optionneur
case 8: { // rondpoint:Optionneur
type = &kRondpointOptionneurPointerType;
break;
}
case 11: { // rondpoint:Retourneur
case 9: { // rondpoint:Retourneur
type = &kRondpointRetourneurPointerType;
break;
}
case 12: { // rondpoint:Stringifier
case 10: { // rondpoint:Stringifier
type = &kRondpointStringifierPointerType;
break;
}
case 13: { // sprites:Sprite
case 11: { // sprites:Sprite
type = &kSpritesSpritePointerType;
break;
}
case 14: { // todolist:TodoList
case 12: { // todolist:TodoList
type = &kTodolistTodoListPointerType;
break;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -117,7 +117,7 @@ libz-rs-sys = { git = "https://github.com/memorysafety/zlib-rs", rev = "4aa430cc
[target.'cfg(not(target_os = "android"))'.dependencies]
viaduct = "0.1"
webext-storage = { version = "0.1" }
webext_storage_bridge = { path = "../../../components/extensions/storage/webext_storage_bridge" }
tabs = { version = "0.1" }
suggest = { version = "0.1" }
relevancy = { version = "0.1" }

Просмотреть файл

@ -54,7 +54,7 @@ extern crate xpcom;
extern crate audio_thread_priority;
#[cfg(not(target_os = "android"))]
extern crate webext_storage;
extern crate webext_storage_bridge;
#[cfg(not(target_os = "android"))]
extern crate tabs;
@ -64,7 +64,6 @@ mod reexport_appservices_uniffi_scaffolding {
tabs::uniffi_reexport_scaffolding!();
relevancy::uniffi_reexport_scaffolding!();
suggest::uniffi_reexport_scaffolding!();
webext_storage::uniffi_reexport_scaffolding!();
}
#[cfg(not(target_os = "android"))]