Removed obsolete iOS sync logic
This commit is contained in:
Родитель
10b032aab0
Коммит
cc29c6925f
|
@ -1,5 +1,11 @@
|
|||
# v117.0 (In progress)
|
||||
|
||||
## General
|
||||
|
||||
### 🦊 What's Changed 🦊
|
||||
|
||||
- Removed obsolete sync functions that were exposed for Firefox iOS prior to the sync manager component integration ([#5725](https://github.com/mozilla/application-services/pull/5725)).
|
||||
|
||||
## Nimbus SDK ⛅️🔬🔭
|
||||
|
||||
### ✨ What's New ✨
|
||||
|
|
|
@ -1226,6 +1226,7 @@ dependencies = [
|
|||
"sync-guid",
|
||||
"sync15",
|
||||
"tempfile",
|
||||
"url",
|
||||
"viaduct-reqwest",
|
||||
]
|
||||
|
||||
|
@ -1237,10 +1238,13 @@ dependencies = [
|
|||
"base64 0.13.0",
|
||||
"chrono",
|
||||
"cli-support",
|
||||
"interrupt-support",
|
||||
"log",
|
||||
"serde_json",
|
||||
"structopt",
|
||||
"sync15",
|
||||
"tabs",
|
||||
"url",
|
||||
"viaduct-reqwest",
|
||||
]
|
||||
|
||||
|
|
|
@ -21,21 +21,6 @@ open class LoginsStorage {
|
|||
store = try LoginStore(path: databasePath)
|
||||
}
|
||||
|
||||
/// Delete all locally stored login sync metadata. It's unclear if
|
||||
/// there's ever a reason for users to call this
|
||||
open func reset() throws {
|
||||
try queue.sync {
|
||||
try self.store.reset()
|
||||
}
|
||||
}
|
||||
|
||||
/// Delete all locally stored login data.
|
||||
open func wipe() throws {
|
||||
try queue.sync {
|
||||
try self.store.wipe()
|
||||
}
|
||||
}
|
||||
|
||||
open func wipeLocal() throws {
|
||||
try queue.sync {
|
||||
try self.store.wipeLocal()
|
||||
|
@ -103,19 +88,6 @@ open class LoginsStorage {
|
|||
self.store.registerWithSyncManager()
|
||||
}
|
||||
}
|
||||
|
||||
open func sync(unlockInfo: SyncUnlockInfo) throws -> String {
|
||||
return try queue.sync {
|
||||
try self.store
|
||||
.sync(
|
||||
keyId: unlockInfo.kid,
|
||||
accessToken: unlockInfo.fxaAccessToken,
|
||||
syncKey: unlockInfo.syncKey,
|
||||
tokenserverUrl: unlockInfo.tokenserverURL,
|
||||
localEncryptionKey: unlockInfo.loginEncryptionKey
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func migrateLoginsFromSqlcipher(
|
||||
|
|
|
@ -158,7 +158,4 @@ interface LoginStore {
|
|||
|
||||
[Self=ByArc]
|
||||
void register_with_sync_manager();
|
||||
|
||||
[Throws=LoginsApiError, Self=ByArc]
|
||||
string sync(string key_id, string access_token, string sync_key, string tokenserver_url, string local_encryption_key);
|
||||
};
|
||||
|
|
|
@ -9,7 +9,6 @@ use crate::LoginsSyncEngine;
|
|||
use parking_lot::Mutex;
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, Weak};
|
||||
use sync15::client::{sync_multiple, MemoryCachedState, Sync15StorageClientInit};
|
||||
use sync15::engine::{EngineSyncAssociation, SyncEngine, SyncEngineId};
|
||||
|
||||
// Our "sync manager" will use whatever is stashed here.
|
||||
|
@ -150,63 +149,6 @@ impl LoginStore {
|
|||
self.db.lock().add_or_update(entry, &encdec)
|
||||
}
|
||||
|
||||
/// A convenience wrapper around sync_multiple.
|
||||
// Unfortunately, iOS still uses this until they use the sync manager
|
||||
// This can almost die later - consumers should never call it (they should
|
||||
// use the sync manager) and any of our examples probably can too!
|
||||
// Once this dies, `mem_cached_state` can die too.
|
||||
#[handle_error(Error)]
|
||||
pub fn sync(
|
||||
self: Arc<Self>,
|
||||
key_id: String,
|
||||
access_token: String,
|
||||
sync_key: String,
|
||||
tokenserver_url: String,
|
||||
local_encryption_key: String,
|
||||
) -> ApiResult<String> {
|
||||
let mut engine = LoginsSyncEngine::new(Arc::clone(&self))?;
|
||||
engine
|
||||
.set_local_encryption_key(&local_encryption_key)
|
||||
.unwrap();
|
||||
|
||||
// This is a bit hacky but iOS still uses sync() and we can only pass strings over ffi
|
||||
// Below was ported from the "C" ffi code that does essentially the same thing
|
||||
let storage_init = &Sync15StorageClientInit {
|
||||
key_id,
|
||||
access_token,
|
||||
tokenserver_url: url::Url::parse(tokenserver_url.as_str())?,
|
||||
};
|
||||
let root_sync_key = &sync15::KeyBundle::from_ksync_base64(sync_key.as_str())?;
|
||||
|
||||
let mut disk_cached_state = engine.get_global_state()?;
|
||||
let mut mem_cached_state = MemoryCachedState::default();
|
||||
|
||||
let mut result = sync_multiple(
|
||||
&[&engine],
|
||||
&mut disk_cached_state,
|
||||
&mut mem_cached_state,
|
||||
storage_init,
|
||||
root_sync_key,
|
||||
&engine.scope,
|
||||
None,
|
||||
);
|
||||
// We always update the state - sync_multiple does the right thing
|
||||
// if it needs to be dropped (ie, they will be None or contain Nones etc)
|
||||
engine.set_global_state(&disk_cached_state)?;
|
||||
|
||||
// for b/w compat reasons, we do some dances with the result.
|
||||
// XXX - note that this means telemetry isn't going to be reported back
|
||||
// to the app - we need to check with lockwise about whether they really
|
||||
// need these failures to be reported or whether we can loosen this.
|
||||
if let Err(e) = result.result {
|
||||
return Err(e.into());
|
||||
}
|
||||
match result.engine_results.remove("passwords") {
|
||||
None | Some(Ok(())) => Ok(serde_json::to_string(&result.telemetry).unwrap()),
|
||||
Some(Err(e)) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
// This allows the embedding app to say "make this instance available to
|
||||
// the sync manager". The implementation is more like "offer to sync mgr"
|
||||
// (thereby avoiding us needing to link with the sync manager) but
|
||||
|
|
|
@ -70,105 +70,6 @@ public class PlacesAPI {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync the bookmarks collection.
|
||||
*
|
||||
* - Returns: A JSON string representing a telemetry ping for this sync. The
|
||||
* string contains the ping payload, and should be sent to the
|
||||
* telemetry submission endpoint.
|
||||
*
|
||||
* - Throws:
|
||||
* - `PlacesApiError.databaseInterrupted`: If a call is made to `interrupt()` on this
|
||||
* object from another thread.
|
||||
* - `PlacesApiError.unexpected`: When an error that has not specifically been exposed
|
||||
* to Swift is encountered (for example IO errors from
|
||||
* the database code, etc).
|
||||
* - `PlacesApiError.panic`: If the rust code panics while completing this
|
||||
* operation. (If this occurs, please let us know).
|
||||
*/
|
||||
open func syncBookmarks(unlockInfo: SyncUnlockInfo) throws -> String {
|
||||
return try queue.sync {
|
||||
try self.api.bookmarksSync(
|
||||
keyId: unlockInfo.kid,
|
||||
accessToken: unlockInfo.fxaAccessToken,
|
||||
syncKey: unlockInfo.syncKey,
|
||||
tokenserverUrl: unlockInfo.tokenserverURL
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync the History collection.
|
||||
*
|
||||
* - Returns: A JSON string representing a telemetry ping for this sync. The
|
||||
* string contains the ping payload, and should be sent to the
|
||||
* telemetry submission endpoint.
|
||||
*
|
||||
* - Throws:
|
||||
* - `PlacesApiError.databaseInterrupted`: If a call is made to `interrupt()` on this
|
||||
* object from another thread.
|
||||
* - `PlacesApiError.unexpected`: When an error that has not specifically been exposed
|
||||
* to Swift is encountered (for example IO errors from
|
||||
* the database code, etc).
|
||||
* - `PlacesApiError.panic`: If the rust code panics while completing this
|
||||
* operation. (If this occurs, please let us know).
|
||||
*/
|
||||
open func syncHistory(unlockInfo: SyncUnlockInfo) throws -> String {
|
||||
return try queue.sync {
|
||||
try self.api.historySync(
|
||||
keyId: unlockInfo.kid,
|
||||
accessToken: unlockInfo.fxaAccessToken,
|
||||
syncKey: unlockInfo.syncKey,
|
||||
tokenserverUrl: unlockInfo.tokenserverURL
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets all sync metadata for history, including change flags,
|
||||
* sync statuses, and last sync time. The next sync after reset
|
||||
* will behave the same way as a first sync when connecting a new
|
||||
* device.
|
||||
*
|
||||
* This method only needs to be called when the user disconnects
|
||||
* from Sync. There are other times when Places resets sync metadata,
|
||||
* but those are handled internally in the Rust code.
|
||||
*
|
||||
* - Throws:
|
||||
* - `PlacesApiError.databaseInterrupted`: If a call is made to `interrupt()` on this
|
||||
* object from another thread.
|
||||
* - `PlacesApiError.unexpected`: When an error that has not specifically been exposed
|
||||
* to Swift is encountered (for example IO errors from
|
||||
* the database code, etc).
|
||||
* - `PlacesApiError.panic`: If the rust code panics while completing this
|
||||
* operation. (If this occurs, please let us know).
|
||||
*/
|
||||
open func resetHistorySyncMetadata() throws {
|
||||
return try queue.sync {
|
||||
try self.api.resetHistory()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets all sync metadata for bookmarks, including change flags, sync statuses, and
|
||||
* last sync time. The next sync after reset will behave the same way as a first sync
|
||||
* when connecting a new device.
|
||||
*
|
||||
* - Throws:
|
||||
* - `PlacesApiError.databaseInterrupted`: If a call is made to `interrupt()` on this
|
||||
* object from another thread.
|
||||
* - `PlacesApiError.unexpected`: When an error that has not specifically been exposed
|
||||
* to Swift is encountered (for example IO errors from
|
||||
* the database code, etc).
|
||||
* - `PlacesApiError.panic`: If the rust code panics while completing this
|
||||
* operation. (If this occurs, please let us know).
|
||||
*/
|
||||
open func resetBookmarkSyncMetadata() throws {
|
||||
return try queue.sync {
|
||||
try self.api.bookmarksReset()
|
||||
}
|
||||
}
|
||||
|
||||
open func registerWithSyncManager() {
|
||||
queue.sync {
|
||||
self.api.registerWithSyncManager()
|
||||
|
@ -877,13 +778,6 @@ public class PlacesWriteConnection: PlacesReadConnection {
|
|||
}
|
||||
}
|
||||
|
||||
open func wipeLocalHistory() throws {
|
||||
try queue.sync {
|
||||
try self.checkApi()
|
||||
try self.conn.wipeLocalHistory()
|
||||
}
|
||||
}
|
||||
|
||||
open func deleteEverythingHistory() throws {
|
||||
try queue.sync {
|
||||
try self.checkApi()
|
||||
|
|
|
@ -11,7 +11,7 @@ autofill = { path = "../autofill" }
|
|||
sync15 = { path = "../sync15", features = ["sync-client"] }
|
||||
places = { path = "../places" }
|
||||
logins = { path = "../logins" }
|
||||
tabs = { path = "../tabs", features = ["full-sync"] }
|
||||
tabs = { path = "../tabs" }
|
||||
thiserror = "1.0"
|
||||
anyhow = "1.0"
|
||||
lazy_static = "1.4"
|
||||
|
|
|
@ -6,19 +6,6 @@ authors = ["application-services@mozilla.com"]
|
|||
license = "MPL-2.0"
|
||||
exclude = ["/android", "/ios"]
|
||||
|
||||
[features]
|
||||
# When used in desktop we *do not* want the full-sync implementation so desktop
|
||||
# doesn't get our crypto etc.
|
||||
# When used on mobile, for simplicity we *do* still expose the unused "bridged engine"
|
||||
# because none of the engine implementations need the crypto.
|
||||
default = []
|
||||
|
||||
# TODO: we've enabled the "standalone-sync" feature - see the description
|
||||
# of this feature in sync15's Cargo.toml for what we should do instead.
|
||||
# (The short version here is that once tabs doesn't need to expose a `sync()`
|
||||
# method for iOS, we can kill the `full-sync` feature entirely)
|
||||
full-sync = ["sync15/standalone-sync"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
error-support = { path = "../support/error" }
|
||||
|
|
|
@ -26,32 +26,9 @@ open class TabsStorage {
|
|||
}
|
||||
}
|
||||
|
||||
open func reset() throws {
|
||||
try queue.sync {
|
||||
try self.store.reset()
|
||||
}
|
||||
}
|
||||
|
||||
open func registerWithSyncManager() {
|
||||
queue.sync {
|
||||
self.store.registerWithSyncManager()
|
||||
}
|
||||
}
|
||||
|
||||
open func sync(unlockInfo: SyncUnlockInfo) throws -> String {
|
||||
guard let tabsLocalId = unlockInfo.tabsLocalId else {
|
||||
throw TabsApiError.UnexpectedTabsError(reason: "tabs local ID was not provided")
|
||||
}
|
||||
|
||||
return try queue.sync {
|
||||
try self.store
|
||||
.sync(
|
||||
keyId: unlockInfo.kid,
|
||||
accessToken: unlockInfo.fxaAccessToken,
|
||||
syncKey: unlockInfo.syncKey,
|
||||
tokenserverUrl: unlockInfo.tokenserverURL,
|
||||
localId: tabsLocalId
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,7 +108,8 @@ impl RemoteTab {
|
|||
// (We hope to get these 2 engines even closer in the future, but for now, we suck this up)
|
||||
pub struct TabsEngine {
|
||||
pub(super) store: Arc<TabsStore>,
|
||||
pub(super) local_id: RwLock<String>,
|
||||
// local_id is made public for use in examples/tabs-sync
|
||||
pub local_id: RwLock<String>,
|
||||
}
|
||||
|
||||
impl TabsEngine {
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use crate::{ApiResult, TabsEngine, TabsStore};
|
||||
use error_support::handle_error;
|
||||
use interrupt_support::NeverInterrupts;
|
||||
use std::sync::Arc;
|
||||
use sync15::client::{sync_multiple, MemoryCachedState, Sync15StorageClientInit};
|
||||
use sync15::engine::EngineSyncAssociation;
|
||||
use sync15::KeyBundle;
|
||||
|
||||
impl TabsStore {
|
||||
pub fn reset(self: Arc<Self>) -> ApiResult<()> {
|
||||
use sync15::engine::SyncEngine;
|
||||
let engine = TabsEngine::new(Arc::clone(&self));
|
||||
engine.reset(&EngineSyncAssociation::Disconnected)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// A convenience wrapper around sync_multiple.
|
||||
#[handle_error(crate::Error)]
|
||||
pub fn sync(
|
||||
self: Arc<Self>,
|
||||
key_id: String,
|
||||
access_token: String,
|
||||
sync_key: String,
|
||||
tokenserver_url: String,
|
||||
local_id: String,
|
||||
) -> ApiResult<String> {
|
||||
let mut mem_cached_state = MemoryCachedState::default();
|
||||
let engine = TabsEngine::new(Arc::clone(&self));
|
||||
|
||||
// Since we are syncing without the sync manager, there's no
|
||||
// command processor, therefore no clients engine, and in
|
||||
// consequence `TabsStore::prepare_for_sync` is never called
|
||||
// which means our `local_id` will never be set.
|
||||
// Do it here.
|
||||
*engine.local_id.write().unwrap() = local_id;
|
||||
|
||||
let storage_init = &Sync15StorageClientInit {
|
||||
key_id,
|
||||
access_token,
|
||||
tokenserver_url: url::Url::parse(tokenserver_url.as_str())?,
|
||||
};
|
||||
let root_sync_key = &KeyBundle::from_ksync_base64(sync_key.as_str())?;
|
||||
|
||||
let mut result = sync_multiple(
|
||||
&[&engine],
|
||||
&mut None,
|
||||
&mut mem_cached_state,
|
||||
storage_init,
|
||||
root_sync_key,
|
||||
&NeverInterrupts,
|
||||
None,
|
||||
);
|
||||
|
||||
// for b/w compat reasons, we do some dances with the result.
|
||||
// XXX - note that this means telemetry isn't going to be reported back
|
||||
// to the app - we need to check with lockwise about whether they really
|
||||
// need these failures to be reported or whether we can loosen this.
|
||||
if let Err(e) = result.result {
|
||||
return Err(e.into());
|
||||
}
|
||||
match result.engine_results.remove("tabs") {
|
||||
None | Some(Ok(())) => Ok(serde_json::to_string(&result.telemetry)?),
|
||||
Some(Err(e)) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,31 +5,3 @@
|
|||
pub(crate) mod bridge;
|
||||
pub(crate) mod engine;
|
||||
pub(crate) mod record;
|
||||
|
||||
#[cfg(feature = "full-sync")]
|
||||
pub mod full_sync;
|
||||
|
||||
// When full-sync isn't enabled we need stub versions for these UDL exposed functions.
|
||||
#[cfg(not(feature = "full-sync"))]
|
||||
impl crate::TabsStore {
|
||||
pub fn reset(self: std::sync::Arc<Self>) -> crate::error::ApiResult<()> {
|
||||
log::warn!("reset: feature not enabled");
|
||||
Err(crate::error::TabsApiError::SyncError {
|
||||
reason: "reset".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn sync(
|
||||
self: std::sync::Arc<Self>,
|
||||
_key_id: String,
|
||||
_access_token: String,
|
||||
_sync_key: String,
|
||||
_tokenserver_url: String,
|
||||
_local_id: String,
|
||||
) -> crate::error::ApiResult<String> {
|
||||
log::warn!("sync: feature not enabled");
|
||||
Err(crate::error::TabsApiError::SyncError {
|
||||
reason: "sync".to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,12 +26,6 @@ interface TabsStore {
|
|||
[Self=ByArc]
|
||||
void register_with_sync_manager();
|
||||
|
||||
[Throws=TabsApiError, Self=ByArc]
|
||||
void reset();
|
||||
|
||||
[Throws=TabsApiError, Self=ByArc]
|
||||
string sync(string key_id, string access_token, string sync_key, string tokenserver_url, string local_id);
|
||||
|
||||
[Self=ByArc]
|
||||
TabsBridgedEngine bridged_engine();
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ clap = "2.33"
|
|||
cli-support = { path = "../cli-support" }
|
||||
tempfile = "3"
|
||||
serde_json = "1.0"
|
||||
url = "2.2"
|
||||
viaduct-reqwest = { path = "../../components/support/viaduct-reqwest" }
|
||||
|
||||
[dev-dependencies.rusqlite]
|
||||
|
|
|
@ -16,7 +16,10 @@ use logins::{
|
|||
use prettytable::{row, Cell, Row, Table};
|
||||
use rusqlite::OptionalExtension;
|
||||
use std::sync::Arc;
|
||||
use sync15::engine::{EngineSyncAssociation, SyncEngine};
|
||||
use sync15::{
|
||||
client::{sync_multiple, MemoryCachedState, Sync15StorageClientInit},
|
||||
engine::{EngineSyncAssociation, SyncEngine},
|
||||
};
|
||||
|
||||
// I'm completely punting on good error handling here.
|
||||
use anyhow::{bail, Result};
|
||||
|
@ -365,6 +368,49 @@ fn set_encryption_key(store: &LoginStore, key: &str) -> rusqlite::Result<()> {
|
|||
.map(|_| ())
|
||||
}
|
||||
|
||||
fn do_sync(
|
||||
store: Arc<LoginStore>,
|
||||
key_id: String,
|
||||
access_token: String,
|
||||
sync_key: String,
|
||||
tokenserver_url: url::Url,
|
||||
local_encryption_key: String,
|
||||
) -> Result<String> {
|
||||
let mut engine = LoginsSyncEngine::new(Arc::clone(&store))?;
|
||||
engine
|
||||
.set_local_encryption_key(&local_encryption_key)
|
||||
.unwrap();
|
||||
|
||||
let storage_init = &Sync15StorageClientInit {
|
||||
key_id,
|
||||
access_token,
|
||||
tokenserver_url,
|
||||
};
|
||||
let root_sync_key = &sync15::KeyBundle::from_ksync_base64(sync_key.as_str())?;
|
||||
|
||||
let mut disk_cached_state = engine.get_global_state()?;
|
||||
let mut mem_cached_state = MemoryCachedState::default();
|
||||
|
||||
let mut result = sync_multiple(
|
||||
&[&engine],
|
||||
&mut disk_cached_state,
|
||||
&mut mem_cached_state,
|
||||
storage_init,
|
||||
root_sync_key,
|
||||
&engine.scope,
|
||||
None,
|
||||
);
|
||||
engine.set_global_state(&disk_cached_state)?;
|
||||
|
||||
if let Err(e) = result.result {
|
||||
return Err(e.into());
|
||||
}
|
||||
match result.engine_results.remove("passwords") {
|
||||
None | Some(Ok(())) => Ok(serde_json::to_string(&result.telemetry).unwrap()),
|
||||
Some(Err(e)) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::cognitive_complexity)] // FIXME
|
||||
fn main() -> Result<()> {
|
||||
cli_support::init_trace_logging();
|
||||
|
@ -499,11 +545,12 @@ fn main() -> Result<()> {
|
|||
token_info.key.unwrap().key_bytes()?,
|
||||
base64::URL_SAFE_NO_PAD,
|
||||
);
|
||||
match Arc::clone(&store).sync(
|
||||
match do_sync(
|
||||
Arc::clone(&store),
|
||||
cli_fxa.client_init.key_id.clone(),
|
||||
cli_fxa.client_init.access_token.clone(),
|
||||
sync_key,
|
||||
cli_fxa.client_init.tokenserver_url.to_string(),
|
||||
cli_fxa.client_init.tokenserver_url.clone(),
|
||||
encryption_key.clone(),
|
||||
) {
|
||||
Err(e) => {
|
||||
|
|
|
@ -12,11 +12,14 @@ path = "src/tabs-sync.rs"
|
|||
|
||||
[dev-dependencies]
|
||||
base64 = "0.13"
|
||||
tabs = { path = "../../components/tabs", features=["full-sync"] }
|
||||
tabs = { path = "../../components/tabs" }
|
||||
serde_json = "1"
|
||||
log = "0.4"
|
||||
anyhow = "1.0"
|
||||
chrono = "0.4"
|
||||
structopt = "0.3"
|
||||
cli-support = { path = "../cli-support" }
|
||||
url = "2.2"
|
||||
interrupt-support = { path = "../../components/support/interrupt" }
|
||||
sync15 = { path = "../../components/sync15", features = ["sync-engine"] }
|
||||
viaduct-reqwest = { path = "../../components/support/viaduct-reqwest" }
|
||||
|
|
|
@ -6,10 +6,15 @@
|
|||
|
||||
use cli_support::fxa_creds::{get_account_and_token, get_cli_fxa, get_default_fxa_config};
|
||||
use cli_support::prompt::{prompt_char, prompt_string};
|
||||
use interrupt_support::NeverInterrupts;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use structopt::StructOpt;
|
||||
use tabs::{RemoteTabRecord, TabsStore};
|
||||
use sync15::{
|
||||
client::{sync_multiple, MemoryCachedState, Sync15StorageClientInit},
|
||||
KeyBundle,
|
||||
};
|
||||
use tabs::{RemoteTabRecord, TabsEngine, TabsStore};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
|
@ -45,6 +50,50 @@ fn ms_to_string(ms: i64) -> String {
|
|||
dtl.format("%F %r").to_string()
|
||||
}
|
||||
|
||||
fn do_sync(
|
||||
store: Arc<TabsStore>,
|
||||
key_id: String,
|
||||
access_token: String,
|
||||
sync_key: String,
|
||||
tokenserver_url: url::Url,
|
||||
local_id: String,
|
||||
) -> Result<String> {
|
||||
let mut mem_cached_state = MemoryCachedState::default();
|
||||
let engine = TabsEngine::new(Arc::clone(&store));
|
||||
|
||||
// Since we are syncing without the sync manager, there's no
|
||||
// command processor, therefore no clients engine, and in
|
||||
// consequence `TabsStore::prepare_for_sync` is never called
|
||||
// which means our `local_id` will never be set.
|
||||
// Do it here.
|
||||
*engine.local_id.write().unwrap() = local_id;
|
||||
|
||||
let storage_init = &Sync15StorageClientInit {
|
||||
key_id,
|
||||
access_token,
|
||||
tokenserver_url: url::Url::parse(tokenserver_url.as_str())?,
|
||||
};
|
||||
let root_sync_key = &KeyBundle::from_ksync_base64(sync_key.as_str())?;
|
||||
|
||||
let mut result = sync_multiple(
|
||||
&[&engine],
|
||||
&mut None,
|
||||
&mut mem_cached_state,
|
||||
storage_init,
|
||||
root_sync_key,
|
||||
&NeverInterrupts,
|
||||
None,
|
||||
);
|
||||
|
||||
if let Err(e) = result.result {
|
||||
return Err(e.into());
|
||||
}
|
||||
match result.engine_results.remove("tabs") {
|
||||
None | Some(Ok(())) => Ok(serde_json::to_string(&result.telemetry)?),
|
||||
Some(Err(e)) => Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
viaduct_reqwest::use_reqwest_backend();
|
||||
cli_support::init_logging();
|
||||
|
@ -121,11 +170,12 @@ fn main() -> Result<()> {
|
|||
}
|
||||
'S' | 's' => {
|
||||
log::info!("Syncing!");
|
||||
match Arc::clone(&store).sync(
|
||||
match do_sync(
|
||||
Arc::clone(&store),
|
||||
cli_fxa.client_init.clone().key_id,
|
||||
cli_fxa.client_init.clone().access_token,
|
||||
sync_key.clone(),
|
||||
cli_fxa.client_init.tokenserver_url.to_string(),
|
||||
cli_fxa.client_init.tokenserver_url.clone(),
|
||||
device_id.clone(),
|
||||
) {
|
||||
Err(e) => {
|
||||
|
|
|
@ -11,7 +11,7 @@ crate-type = ["cdylib"]
|
|||
[dependencies]
|
||||
fxa-client = { path = "../../components/fxa-client" }
|
||||
logins = { path = "../../components/logins" }
|
||||
tabs = { path = "../../components/tabs/", features = ["full-sync"] }
|
||||
tabs = { path = "../../components/tabs/" }
|
||||
sync_manager = { path = "../../components/sync_manager/" }
|
||||
places = { path = "../../components/places" }
|
||||
push = { path = "../../components/push" }
|
||||
|
|
|
@ -19,7 +19,7 @@ fxa-client = { path = "../../components/fxa-client" }
|
|||
logins = { path = "../../components/logins" }
|
||||
autofill = { path = "../../components/autofill" }
|
||||
push = { path = "../../components/push" }
|
||||
tabs = { path = "../../components/tabs", features = ["full-sync"] }
|
||||
tabs = { path = "../../components/tabs" }
|
||||
places = {path = "../../components/places" }
|
||||
remote_settings = { path = "../../components/remote_settings" }
|
||||
sync15 = {path = "../../components/sync15"}
|
||||
|
|
|
@ -13,7 +13,7 @@ logins = { path = "../../components/logins" }
|
|||
sync15 = { path = "../../components/sync15" }
|
||||
sync_manager = { path = "../../components/sync_manager" }
|
||||
restmail-client = { path = "../../components/support/restmail-client" }
|
||||
tabs = { path = "../../components/tabs", features = ["full-sync"] }
|
||||
tabs = { path = "../../components/tabs" }
|
||||
fxa-client = { path = "../../components/fxa-client" }
|
||||
sync-guid = { path = "../../components/support/guid", features = ["rusqlite_support", "random"]}
|
||||
interrupt-support = { path = "../../components/support/interrupt" }
|
||||
|
|
Загрузка…
Ссылка в новой задаче