Backed out changeset 3b7ccffc0546 (bug 1598924) for permafailing bug 1621759 on a CLOSED TREE

This commit is contained in:
Andreea Pavel 2020-03-23 21:37:55 +02:00
Родитель ec49a68223
Коммит 3550cada92
3 изменённых файлов: 36 добавлений и 165 удалений

Просмотреть файл

@ -5,12 +5,10 @@
const { XPCOMUtils } = ChromeUtils.import(
"resource://gre/modules/XPCOMUtils.jsm"
);
const { Services } = ChromeUtils.import("resource://gre/modules/Services.jsm");
XPCOMUtils.defineLazyGlobalGetters(this, ["indexedDB"]);
XPCOMUtils.defineLazyModuleGetters(this, {
AsyncShutdown: "resource://gre/modules/AsyncShutdown.jsm",
CommonUtils: "resource://services-common/utils.js",
});
@ -31,18 +29,12 @@ class IndexedDBError extends Error {
}
}
class ShutdownError extends IndexedDBError {
constructor(error, method) {
super(error, method);
}
}
/**
* InternalDatabase is a tiny wrapper with the objective
* Database is a tiny wrapper with the objective
* of providing major kinto-offline-client collection API.
* (with the objective of getting rid of kinto-offline-client)
*/
class InternalDatabase {
class Database {
/* Expose the IDBError class publicly */
static get IDBError() {
return IndexedDBError;
@ -413,112 +405,3 @@ function sortObjects(order, list) {
return a[field] > b[field] ? direction : -direction;
});
}
const gPendingOperations = new Set();
const kWrappedMethods = new Set([
"open",
"close",
"list",
"importBulk",
"deleteBulk",
"getLastModified",
"saveLastModified",
"getMetadata",
"saveMetadata",
"clear",
"create",
"update",
"delete",
]);
/**
* We need to block shutdown on any pending indexeddb operations, and also
* want to avoid starting new ones if we already know we're going to be
* shutting down. In order to do this, we wrap all the database objects
* we create in a proxy that lets us catch any pending operations, and remove
* them when finished.
*/
function Database(identifier) {
ensureShutdownBlocker();
let db = new InternalDatabase(identifier);
// This helper returns a function that gets executed when consumers runs an
// async operation on the underlying database. It throws if run after
// shutdown has started. Otherwise, it wraps the result from the "real"
// method call and adds it as a pending operation, removing it when the
// operation is done.
function _getWrapper(target, method) {
return function(...args) {
// For everything other than `close`, check if we're shutting down:
if (method != "close" && Services.startup.shuttingDown) {
throw new ShutdownError(
"The application is shutting down, can't call " + method
);
}
// We're not shutting down yet. Kick off the actual method call,
// and stash the result.
let promise = target[method].apply(target, args);
// Now put a "safe" wrapper of the promise in our shutdown tracker,
let safePromise = promise.catch(ex => Cu.reportError(ex));
let operationInfo = {
promise: safePromise,
method,
identifier,
};
gPendingOperations.add(operationInfo);
// And ensure we remove it once done.
safePromise.then(() => gPendingOperations.delete(operationInfo));
// Now return the original.
return promise;
};
}
return new Proxy(db, {
get(target, property, receiver) {
// For tests so they can stub methods. Otherwise, we end up fetching
// wrappers from the proxy, and then assigning them onto the real
// object when the test is finished, and then callers to that method
// hit infinite recursion...
if (property == "_wrappedDBForTestOnly") {
return target;
}
if (kWrappedMethods.has(property)) {
return _getWrapper(target, property);
}
if (typeof target[property] == "function") {
// If we get here, someone's tried to use some other operation that we
// don't yet know how to proxy. Throw to ensure we fix that:
throw new Error(
`Don't know how to process access to 'database.${property}()'` +
" Add to kWrappedMethods or otherwise update the proxy to deal."
);
}
return target[property];
},
});
}
Database.IDBError = InternalDatabase.IDBError;
Database.ShutdownError = ShutdownError;
let gShutdownBlocker = false;
function ensureShutdownBlocker() {
if (gShutdownBlocker) {
return;
}
gShutdownBlocker = true;
AsyncShutdown.profileBeforeChange.addBlocker(
"RemoteSettingsClient - finish IDB access.",
() => {
return Promise.all(Array.from(gPendingOperations).map(op => op.promise));
},
{
fetchState() {
let info = [];
for (let op of gPendingOperations) {
info.push({ method: op.method, collectionName: op.collectionName });
}
return info;
},
}
);
}

Просмотреть файл

@ -177,12 +177,8 @@ class AttachmentDownloader extends Downloader {
* current list of records.
*/
async deleteAll() {
let allRecords;
try {
allRecords = await this._client.db.list();
} finally {
await this._client.db.close();
}
const allRecords = await this._client.db.list();
await this._client.db.close();
return Promise.all(
allRecords.filter(r => !!r.attachment).map(r => this.delete(r))
);
@ -236,7 +232,11 @@ class RemoteSettingsClient extends EventEmitter {
this.bucketNamePref
);
XPCOMUtils.defineLazyGetter(this, "db", () => Database(this.identifier));
XPCOMUtils.defineLazyGetter(
this,
"db",
() => new Database(this.identifier)
);
XPCOMUtils.defineLazyGetter(
this,
@ -272,17 +272,12 @@ class RemoteSettingsClient extends EventEmitter {
let timestamp = -1;
try {
timestamp = await this.db.getLastModified();
await this.db.close();
} catch (err) {
console.warn(
`Error retrieving the getLastModified timestamp from ${this.identifier} RemoteSettingClient`,
err
);
} finally {
try {
await this.db.close();
} catch (err) {
console.warn(`Couldn't close db connection`);
}
}
return timestamp;
@ -331,33 +326,26 @@ class RemoteSettingsClient extends EventEmitter {
}
// Read from the local DB.
let data, localRecords, timestamp, metadata;
try {
data = await this.db.list({ filters, order });
const data = await this.db.list({ filters, order });
if (verifySignature) {
console.debug(
`${this.identifier} verify signature of local data on read`
);
const allData = ObjectUtils.isEmpty(filters)
? data
: await this.db.list();
localRecords = allData.map(r => this._cleanLocalFields(r));
timestamp = await this.db.getLastModified();
metadata = await this.db.getMetadata();
if (syncIfEmpty && ObjectUtils.isEmpty(metadata)) {
// No sync occured yet, may have records from dump but no metadata.
console.debug(
`Required metadata for ${this.identifier}, fetching from server.`
);
metadata = await this.httpClient().getData();
await this.db.saveMetadata(metadata);
}
}
} finally {
await this.db.close();
}
if (verifySignature) {
console.debug(
`${this.identifier} verify signature of local data on read`
);
const allData = ObjectUtils.isEmpty(filters)
? data
: await this.db.list();
const localRecords = allData.map(r => this._cleanLocalFields(r));
const timestamp = await this.db.getLastModified();
let metadata = await this.db.getMetadata();
if (syncIfEmpty && ObjectUtils.isEmpty(metadata)) {
// No sync occured yet, may have records from dump but no metadata.
console.debug(
`Required metadata for ${this.identifier}, fetching from server.`
);
metadata = await this.httpClient().getData();
await this.db.saveMetadata(metadata);
}
await this._validateCollectionSignature(
localRecords,
timestamp,
@ -365,6 +353,8 @@ class RemoteSettingsClient extends EventEmitter {
);
}
await this.db.close();
// Filter the records based on `this.filterFunc` results.
return this._filterEntries(data);
}
@ -659,8 +649,6 @@ class RemoteSettingsClient extends EventEmitter {
} else if (e instanceof RemoteSettingsClient.MissingSignatureError) {
// Collection metadata has no signature info, no need to retry.
reportStatus = UptakeTelemetry.STATUS.SIGNATURE_ERROR;
} else if (e instanceof Database.ShutdownError) {
reportStatus = UptakeTelemetry.STATUS.SHUTDOWN_ERROR;
} else if (/unparseable/.test(e.message)) {
reportStatus = UptakeTelemetry.STATUS.PARSE_ERROR;
} else if (/NetworkError/.test(e.message)) {

Просмотреть файл

@ -685,7 +685,7 @@ add_task(async function test_telemetry_reports_if_fetching_signature_fails() {
add_task(clear_state);
add_task(async function test_telemetry_reports_unknown_errors() {
const backup = client.db._wrappedDBForTestOnly.list;
const backup = client.db.list;
client.db.list = () => {
throw new Error("Internal");
};
@ -695,7 +695,7 @@ add_task(async function test_telemetry_reports_unknown_errors() {
await client.maybeSync(2000);
} catch (e) {}
client.db._wrappedDBForTestOnly.list = backup;
client.db.list = backup;
const endHistogram = getUptakeTelemetrySnapshot(client.identifier);
const expectedIncrements = { [UptakeTelemetry.STATUS.UNKNOWN_ERROR]: 1 };
checkUptakeTelemetry(startHistogram, endHistogram, expectedIncrements);
@ -703,7 +703,7 @@ add_task(async function test_telemetry_reports_unknown_errors() {
add_task(clear_state);
add_task(async function test_telemetry_reports_indexeddb_as_custom_1() {
const backup = client.db._wrappedDBForTestOnly.getLastModified;
const backup = client.db.getLastModified;
const msg =
"IndexedDB getLastModified() The operation failed for reasons unrelated to the database itself";
client.db.getLastModified = () => {
@ -715,7 +715,7 @@ add_task(async function test_telemetry_reports_indexeddb_as_custom_1() {
await client.maybeSync(2000);
} catch (e) {}
client.db._wrappedDBForTestOnly.getLastModified = backup;
client.db.getLastModified = backup;
const endHistogram = getUptakeTelemetrySnapshot(client.identifier);
const expectedIncrements = { [UptakeTelemetry.STATUS.CUSTOM_1_ERROR]: 1 };
checkUptakeTelemetry(startHistogram, endHistogram, expectedIncrements);
@ -723,7 +723,7 @@ add_task(async function test_telemetry_reports_indexeddb_as_custom_1() {
add_task(clear_state);
add_task(async function test_telemetry_reports_error_name_as_event_nightly() {
const backup = client.db._wrappedDBForTestOnly.list;
const backup = client.db.list;
client.db.list = () => {
const e = new Error("Some unknown error");
e.name = "ThrownError";
@ -751,7 +751,7 @@ add_task(async function test_telemetry_reports_error_name_as_event_nightly() {
]);
});
client.db._wrappedDBForTestOnly.list = backup;
client.db.list = backup;
});
add_task(clear_state);