Backed out changeset 09705f26786b (bug 1635348) for test_ExtensionStorageSync_migration_kinto.js failures CLOSED TREE

This commit is contained in:
Bogdan Tara 2020-06-01 07:47:14 +03:00
Родитель d6040f0d20
Коммит 32212687cd
8 изменённых файлов: 57 добавлений и 244 удалений

Просмотреть файл

@ -36,17 +36,6 @@ XPCOMUtils.defineLazyGetter(this, "storageSvc", () =>
.getInterface(Ci.mozIExtensionStorageArea)
);
// We might end up falling back to kinto...
XPCOMUtils.defineLazyGetter(
this,
"extensionStorageSyncKinto",
() =>
ChromeUtils.import(
"resource://gre/modules/ExtensionStorageSyncKinto.jsm",
{}
).extensionStorageSync
);
// The interfaces which define the callbacks used by the bridge. There's a
// callback for success, failure, and to record data changes.
function ExtensionStorageApiCallback(resolve, reject, extId, changeCallback) {
@ -70,7 +59,15 @@ ExtensionStorageApiCallback.prototype = {
let e = new Error(message);
e.code = code;
Cu.reportError(e);
this.reject(e);
// The only "public" exception here is for quota failure - all others are
// sanitized.
let sanitized =
code == NS_ERROR_DOM_QUOTA_EXCEEDED_ERR
? // The same message as the local IDB implementation
`QuotaExceededError: storage.sync API call exceeded its quota limitations.`
: // The standard, generic extension error.
"An unexpected error occurred";
this.reject(new ExtensionUtils.ExtensionError(sanitized));
},
onChanged(json) {
@ -88,79 +85,52 @@ ExtensionStorageApiCallback.prototype = {
class ExtensionStorageSync {
constructor() {
this.listeners = new Map();
// We are optimistic :) If we ever see the special nsresult which indicates
// migration failure, it will become false. In practice, this will only ever
// happen on the first operation.
this.migrationOk = true;
}
// The main entry-point to our bridge. It performs some important roles:
// * Ensures the API is allowed to be used.
// * Works out what "extension id" to use.
// * Turns the callback API into a promise API.
async _promisify(fnName, extension, context, ...args) {
async _promisify(fn, extension, ...args) {
let extId = extension.id;
if (prefPermitsStorageSync !== true) {
throw new ExtensionUtils.ExtensionError(
`Please set ${STORAGE_SYNC_ENABLED_PREF} to true in about:config`
);
}
if (this.migrationOk) {
// We can call ours.
try {
return await new Promise((resolve, reject) => {
let callback = new ExtensionStorageApiCallback(
resolve,
reject,
extId,
(extId, changes) => this.notifyListeners(extId, changes)
);
let sargs = args.map(JSON.stringify);
storageSvc[fnName](extId, ...sargs, callback);
});
} catch (ex) {
if (ex.code != Cr.NS_ERROR_CANNOT_CONVERT_DATA) {
// Some non-migration related error we want to sanitize and propagate.
// The only "public" exception here is for quota failure - all others
// are sanitized.
let sanitized =
ex.code == NS_ERROR_DOM_QUOTA_EXCEEDED_ERR
? // The same message as the local IDB implementation
`QuotaExceededError: storage.sync API call exceeded its quota limitations.`
: // The standard, generic extension error.
"An unexpected error occurred";
throw new ExtensionUtils.ExtensionError(sanitized);
}
// This means "migrate failed" so we must fall back to kinto.
Cu.reportError(
"migration of extension-storage failed - will fall back to kinto"
);
this.migrationOk = false;
}
}
// We've detected failure to migrate, so we want to use kinto.
return extensionStorageSyncKinto[fnName](extension, ...args, context);
return new Promise((resolve, reject) => {
let callback = new ExtensionStorageApiCallback(
resolve,
reject,
extId,
(extId, changes) => this.notifyListeners(extId, changes)
);
fn(extId, ...args, callback);
});
}
set(extension, items, context) {
return this._promisify("set", extension, context, items);
return this._promisify(storageSvc.set, extension, JSON.stringify(items));
}
remove(extension, keys, context) {
return this._promisify("remove", extension, context, keys);
return this._promisify(storageSvc.remove, extension, JSON.stringify(keys));
}
clear(extension, context) {
return this._promisify("clear", extension, context);
return this._promisify(storageSvc.clear, extension);
}
get(extension, spec, context) {
return this._promisify("get", extension, context, spec);
return this._promisify(storageSvc.get, extension, JSON.stringify(spec));
}
getBytesInUse(extension, keys, context) {
return this._promisify("getBytesInUse", extension, context, keys);
return this._promisify(
storageSvc.getBytesInUse,
extension,
JSON.stringify(keys)
);
}
addOnChangedListener(extension, listener, context) {

Просмотреть файл

@ -64,9 +64,8 @@ function StorageSyncService() {
return StorageSyncService._singleton;
}
let file = FileUtils.getFile("ProfD", ["storage-sync-v2.sqlite"]);
let kintoFile = FileUtils.getFile("ProfD", ["storage-sync.sqlite"]);
this._storageArea = new StorageSyncArea(file, kintoFile);
let file = FileUtils.getFile("ProfD", ["storage-sync2.sqlite"]);
this._storageArea = new StorageSyncArea(file);
// Register a blocker to close the storage connection on shutdown.
this._shutdownBound = () => this._shutdown();

Просмотреть файл

@ -69,10 +69,7 @@ interface mozIConfigurableExtensionStorageArea : mozIExtensionStorageArea {
// `configure` multiple times will throw. `configure` must also be called
// before any of the `mozIExtensionStorageArea` methods, or they'll fail
// with errors.
// The second param is the path to the kinto database file from which we
// should migrate. This should always be specified even when there's a
// chance the file doesn't exist.
void configure(in nsIFile databaseFile, in nsIFile kintoFile);
void configure(in nsIFile databaseFile);
// Tears down the storage area, closing the backing database connection.
// This is called automatically when Firefox shuts down. Once a storage area

Просмотреть файл

@ -6,9 +6,7 @@ use std::{
cell::{Ref, RefCell},
convert::TryInto,
ffi::OsString,
mem,
path::PathBuf,
str,
mem, str,
sync::Arc,
};
@ -30,29 +28,6 @@ use crate::error::{Error, Result};
use crate::punt::{Punt, PuntTask, TeardownTask};
use crate::store::{LazyStore, LazyStoreConfig};
fn path_from_nsifile(file: &nsIFile) -> Result<PathBuf> {
let mut raw_path = nsString::new();
// `nsIFile::GetPath` gives us a UTF-16-encoded version of its
// native path, which we must turn back into a platform-native
// string. We can't use `nsIFile::nativePath()` here because
// it's marked as `nostdcall`, which Rust doesn't support.
unsafe { file.GetPath(&mut *raw_path) }.to_result()?;
let native_path = {
// On Windows, we can create a native string directly from the
// encoded path.
#[cfg(windows)]
{
use std::os::windows::prelude::*;
OsString::from_wide(&*raw_path)
}
// On other platforms, we must first decode the raw path from
// UTF-16, and then create our native string.
#[cfg(not(windows))]
OsString::from(String::from_utf16(&*raw_path)?)
};
Ok(native_path.into())
}
/// An XPCOM component class for the Rust extension storage API. This class
/// implements the interfaces needed for syncing and storage.
///
@ -112,15 +87,32 @@ impl StorageSyncArea {
xpcom_method!(
configure => Configure(
database_file: *const nsIFile,
kinto_file: *const nsIFile
database_file: *const nsIFile
)
);
/// Sets up the storage area.
fn configure(&self, database_file: &nsIFile, kinto_file: &nsIFile) -> Result<()> {
fn configure(&self, database_file: &nsIFile) -> Result<()> {
let mut raw_path = nsString::new();
// `nsIFile::GetPath` gives us a UTF-16-encoded version of its
// native path, which we must turn back into a platform-native
// string. We can't use `nsIFile::nativePath()` here because
// it's marked as `nostdcall`, which Rust doesn't support.
unsafe { database_file.GetPath(&mut *raw_path) }.to_result()?;
let native_path = {
// On Windows, we can create a native string directly from the
// encoded path.
#[cfg(windows)]
{
use std::os::windows::prelude::*;
OsString::from_wide(&*raw_path)
}
// On other platforms, we must first decode the raw path from
// UTF-16, and then create our native string.
#[cfg(not(windows))]
OsString::from(String::from_utf16(&*raw_path)?)
};
self.store()?.configure(LazyStoreConfig {
path: path_from_nsifile(database_file)?,
kinto_path: path_from_nsifile(kinto_file)?,
path: native_path.into(),
})?;
Ok(())
}

Просмотреть файл

@ -6,9 +6,8 @@ use std::{error, fmt, result, str::Utf8Error, string::FromUtf16Error};
use golden_gate::Error as GoldenGateError;
use nserror::{
nsresult, NS_ERROR_ALREADY_INITIALIZED, NS_ERROR_CANNOT_CONVERT_DATA,
NS_ERROR_DOM_QUOTA_EXCEEDED_ERR, NS_ERROR_FAILURE, NS_ERROR_INVALID_ARG,
NS_ERROR_NOT_IMPLEMENTED, NS_ERROR_NOT_INITIALIZED, NS_ERROR_UNEXPECTED,
nsresult, NS_ERROR_ALREADY_INITIALIZED, NS_ERROR_DOM_QUOTA_EXCEEDED_ERR, NS_ERROR_FAILURE,
NS_ERROR_INVALID_ARG, NS_ERROR_NOT_IMPLEMENTED, NS_ERROR_NOT_INITIALIZED, NS_ERROR_UNEXPECTED,
};
use serde_json::error::Error as JsonError;
use webext_storage::error::Error as WebextStorageError;
@ -24,7 +23,6 @@ pub type Result<T> = result::Result<T, Error>;
pub enum Error {
Nsresult(nsresult),
WebextStorage(WebextStorageError),
MigrationFailed(WebextStorageError),
GoldenGate(GoldenGateError),
MalformedString(Box<dyn error::Error + Send + Sync + 'static>),
AlreadyConfigured,
@ -88,7 +86,6 @@ impl From<Error> for nsresult {
WebextStorageErrorKind::QuotaError(_) => NS_ERROR_DOM_QUOTA_EXCEEDED_ERR,
_ => NS_ERROR_FAILURE,
},
Error::MigrationFailed(_) => NS_ERROR_CANNOT_CONVERT_DATA,
Error::GoldenGate(error) => error.into(),
Error::MalformedString(_) => NS_ERROR_INVALID_ARG,
Error::AlreadyConfigured => NS_ERROR_ALREADY_INITIALIZED,
@ -106,7 +103,6 @@ impl fmt::Display for Error {
match self {
Error::Nsresult(result) => write!(f, "Operation failed with {}", result),
Error::WebextStorage(error) => error.fmt(f),
Error::MigrationFailed(error) => write!(f, "Migration failed with {}", error),
Error::GoldenGate(error) => error.fmt(f),
Error::MalformedString(error) => error.fmt(f),
Error::AlreadyConfigured => write!(f, "The storage area is already configured"),

Просмотреть файл

@ -3,7 +3,6 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::{
fs::remove_file,
mem,
path::PathBuf,
sync::{Mutex, MutexGuard},
@ -20,11 +19,6 @@ use crate::error::{Error, Result};
pub struct LazyStoreConfig {
/// The path to the database file for this storage area.
pub path: PathBuf,
/// The path to the old kinto database. If it exists, we should attempt to
/// migrate from this database as soon as we open our DB. It's not Option<>
/// because the caller will not have checked whether it exists or not, so
/// will assume it might.
pub kinto_path: PathBuf,
}
/// A lazy store is automatically initialized on a background thread with its
@ -74,7 +68,7 @@ impl LazyStore {
.store
.get_or_try_init(|| match self.config.get() {
Some(config) => {
let store = init_store(config)?;
let store = Store::new(&config.path)?;
let handle = store.interrupt_handle();
Ok(InterruptStore {
inner: Mutex::new(store),
@ -195,51 +189,3 @@ impl BridgedEngine for LazyStore {
Ok(self.get()?.bridged_engine().wipe()?)
}
}
// Initialize the store, performing a migration if necessary.
// The requirements for migration are, roughly:
// * If kinto_path doesn't exist, we don't try to migrate.
// * If our DB path exists, we assume we've already migrated and don't try again
// * If the migration fails, we close our store and delete the DB, then return
// a special error code which tells our caller about the failure. It's then
// expected to fallback to the "old" kinto store and we'll try next time.
// Note that the migrate() method on the store is written such that is should
// ignore all "read" errors from the source, but propagate "write" errors on our
// DB - the intention is that things like corrupted source databases never fail,
// but disk-space failures on our database does.
fn init_store(config: &LazyStoreConfig) -> Result<Store> {
let should_migrate = config.kinto_path.exists() && !config.path.exists();
let store = Store::new(&config.path)?;
if should_migrate {
match store.migrate(&config.kinto_path) {
Ok(num) => {
// need logging, but for now let's print to stdout.
println!("extension-storage: migrated {} records", num);
Ok(store)
}
Err(e) => {
println!("extension-storage: migration failure: {}", e);
if let Err((store, e)) = store.close() {
// welp, this probably isn't going to end well...
println!(
"extension-storage: failed to close the store after migration failure: {}",
e
);
// I don't think we should hit this in this case - I guess we
// could sleep and retry if we thought we were.
mem::drop(store);
}
if let Err(e) = remove_file(&config.path) {
// this is bad - if it happens regularly it will defeat
// out entire migration strategy - we'll assume it
// worked.
// So it's desirable to make noise if this happens.
println!("Failed to remove file after failed migration: {}", e);
}
Err(Error::MigrationFailed(e))
}
}
} else {
Ok(store)
}
}

Просмотреть файл

@ -1,86 +0,0 @@
/* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */
/* vim: set sts=2 sw=2 et tw=80: */
"use strict";
// Import the rust-based and kinto-based implementations
const { extensionStorageSync: rustImpl } = ChromeUtils.import(
"resource://gre/modules/ExtensionStorageSync.jsm"
);
const { extensionStorageSync: kintoImpl } = ChromeUtils.import(
"resource://gre/modules/ExtensionStorageSyncKinto.jsm"
);
Services.prefs.setBoolPref("webextensions.storage.sync.kinto", false);
add_task(async function test_sync_migration() {
// There's no good reason to perform this test via test extensions - we just
// call the underlying APIs directly.
// Set some stuff using the kinto-based impl.
let e1 = { id: "test@mozilla.com" };
let c1 = { extension: e1, callOnClose() {} };
await kintoImpl.set(e1, { foo: "bar" }, c1);
let e2 = { id: "test-2@mozilla.com" };
let c2 = { extension: e2, callOnClose() {} };
await kintoImpl.set(e2, { second: "2nd" }, c2);
let e3 = { id: "test-3@mozilla.com" };
let c3 = { extension: e3, callOnClose() {} };
// And all the data should be magically migrated.
Assert.deepEqual(await rustImpl.get(e1, "foo", c1), { foo: "bar" });
Assert.deepEqual(await rustImpl.get(e2, null, c2), { second: "2nd" });
// Sanity check we really are doing what we think we are - set a value in our
// new one, it should not be reflected by kinto.
await rustImpl.set(e3, { third: "3rd" }, c3);
Assert.deepEqual(await rustImpl.get(e3, null, c3), { third: "3rd" });
Assert.deepEqual(await kintoImpl.get(e3, null, c3), {});
// cleanup.
await kintoImpl.clear(e1, c1);
await kintoImpl.clear(e2, c2);
await kintoImpl.clear(e3, c3);
await rustImpl.clear(e1, c1);
await rustImpl.clear(e2, c2);
await rustImpl.clear(e3, c3);
});
// It would be great to have failure tests, but that seems impossible to have
// in automated tests given the conditions under which we migrate - it would
// basically require us to arrange for zero free disk space or to somehow
// arrange for sqlite to see an io error. Specially crafted "corrupt"
// sqlite files doesn't help because that file must not exist for us to even
// attempt migration.
//
// But - what we can test is that if .migratedOk on the new impl ever goes to
// false we delegate correctly.
add_task(async function test_sync_migration_delgates() {
let e1 = { id: "test@mozilla.com" };
let c1 = { extension: e1, callOnClose() {} };
await kintoImpl.set(e1, { foo: "bar" }, c1);
// We think migration went OK - `get` shouldn't see kinto.
Assert.deepEqual(rustImpl.get(e1, null, c1), {});
info(
"Setting migration failure flag to ensure we delegate to kinto implementation"
);
rustImpl.migrationOk = false;
// get should now be seeing kinto.
Assert.deepEqual(await rustImpl.get(e1, null, c1), { foo: "bar" });
// check everything else delegates.
await rustImpl.set(e1, { foo: "foo" }, c1);
Assert.deepEqual(await kintoImpl.get(e1, null, c1), { foo: "foo" });
Assert.equal(await rustImpl.getBytesInUse(e1, null, c1), 8);
await rustImpl.remove(e1, "foo", c1);
Assert.deepEqual(await kintoImpl.get(e1, null, c1), {});
await rustImpl.set(e1, { foo: "foo" }, c1);
Assert.deepEqual(await kintoImpl.get(e1, null, c1), { foo: "foo" });
await rustImpl.clear(e1, c1);
Assert.deepEqual(await kintoImpl.get(e1, null, c1), {});
});

Просмотреть файл

@ -33,7 +33,6 @@ prefs =
# For tests which rely on conetn pages, and should only run with remote content
# but in-process extensions.
[test_ExtensionStorageSync_migration_kinto.js]
[test_MatchPattern.js]
[test_StorageSyncService.js]
skip-if = os == 'android' && processor == 'x86_64'