зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1620185 - Reuse code from signature failure retry r=glasserc
Differential Revision: https://phabricator.services.mozilla.com/D66412 --HG-- extra : moz-landing-system : lando
This commit is contained in:
Родитель
6d01f4e707
Коммит
fbc12fc39d
|
@ -108,7 +108,7 @@ function bytesToUuid(buf, offset) {
|
|||
var i = offset || 0;
|
||||
var bth = byteToHex;
|
||||
// join used to fix memory issue caused by concatenation: https://bugs.chromium.org/p/v8/issues/detail?id=3175#c4
|
||||
return ([bth[buf[i++]], bth[buf[i++]],
|
||||
return ([bth[buf[i++]], bth[buf[i++]],
|
||||
bth[buf[i++]], bth[buf[i++]], '-',
|
||||
bth[buf[i++]], bth[buf[i++]], '-',
|
||||
bth[buf[i++]], bth[buf[i++]], '-',
|
||||
|
|
|
@ -450,6 +450,10 @@ class RemoteSettingsClient extends EventEmitter {
|
|||
// Synchronize remote data into a local DB using Kinto.
|
||||
kintoCollection = await this.openCollection();
|
||||
let collectionLastModified = await kintoCollection.db.getLastModified();
|
||||
const { data: allData } = await kintoCollection.list({
|
||||
order: "",
|
||||
});
|
||||
let localRecords = allData.map(r => kintoCollection.cleanLocalFields(r));
|
||||
|
||||
// If there is no data currently in the collection, attempt to import
|
||||
// initial data from the application defaults.
|
||||
|
@ -468,36 +472,16 @@ class RemoteSettingsClient extends EventEmitter {
|
|||
}));
|
||||
}
|
||||
collectionLastModified = await kintoCollection.db.getLastModified();
|
||||
const { data: afterDump } = await kintoCollection.list({ order: "" });
|
||||
localRecords = afterDump.map(r =>
|
||||
kintoCollection.cleanLocalFields(r)
|
||||
);
|
||||
} catch (e) {
|
||||
// Report but go-on.
|
||||
Cu.reportError(e);
|
||||
}
|
||||
}
|
||||
|
||||
// If signature verification is enabled, then add a synchronization hook
|
||||
// for incoming changes that validates the signature.
|
||||
if (this.verifySignature) {
|
||||
kintoCollection.hooks["incoming-changes"] = [
|
||||
async (payload, collection) => {
|
||||
const { changes: remoteRecords, lastModified: timestamp } = payload;
|
||||
const { data } = await collection.list({ order: "" }); // no need to sort.
|
||||
const metadata = await collection.metadata();
|
||||
// Local fields are stripped to compute the collection signature (server does not have them).
|
||||
const localRecords = data.map(r => collection.cleanLocalFields(r));
|
||||
await this._validateCollectionSignature(
|
||||
remoteRecords,
|
||||
timestamp,
|
||||
metadata,
|
||||
{ localRecords }
|
||||
);
|
||||
// In case the signature is valid, apply the changes locally.
|
||||
return payload;
|
||||
},
|
||||
];
|
||||
} else {
|
||||
console.warn(`Signature disabled on ${this.identifier}`);
|
||||
}
|
||||
|
||||
let syncResult;
|
||||
try {
|
||||
// Is local timestamp up to date with the server?
|
||||
|
@ -521,12 +505,6 @@ class RemoteSettingsClient extends EventEmitter {
|
|||
console.debug(
|
||||
`${this.identifier} verify signature of local data`
|
||||
);
|
||||
const { data: allData } = await kintoCollection.list({
|
||||
order: "",
|
||||
});
|
||||
const localRecords = allData.map(r =>
|
||||
kintoCollection.cleanLocalFields(r)
|
||||
);
|
||||
await this._validateCollectionSignature(
|
||||
[],
|
||||
collectionLastModified,
|
||||
|
@ -553,8 +531,8 @@ class RemoteSettingsClient extends EventEmitter {
|
|||
const startSyncDB = Cu.now() * 1000;
|
||||
syncResult = await this._importChanges(
|
||||
kintoCollection,
|
||||
collectionLastModified,
|
||||
expectedTimestamp
|
||||
expectedTimestamp,
|
||||
{ localTimestamp: collectionLastModified, localRecords }
|
||||
);
|
||||
if (gTimingEnabled) {
|
||||
const endSyncDB = Cu.now() * 1000;
|
||||
|
@ -599,9 +577,14 @@ class RemoteSettingsClient extends EventEmitter {
|
|||
console.warn(
|
||||
`Signature verified failed for ${this.identifier}. Retry from scratch`
|
||||
);
|
||||
syncResult = await this._retrySyncFromScratch(
|
||||
syncResult = await this._importChanges(
|
||||
kintoCollection,
|
||||
expectedTimestamp
|
||||
expectedTimestamp,
|
||||
{
|
||||
clear: true,
|
||||
localTimestamp: collectionLastModified,
|
||||
localRecords,
|
||||
}
|
||||
);
|
||||
} catch (e) {
|
||||
// If the signature fails again, or if an error occured during wiping out the
|
||||
|
@ -732,46 +715,6 @@ class RemoteSettingsClient extends EventEmitter {
|
|||
return reportStatus;
|
||||
}
|
||||
|
||||
async _importChanges(
|
||||
kintoCollection,
|
||||
collectionLastModified,
|
||||
expectedTimestamp,
|
||||
{ headers } = {}
|
||||
) {
|
||||
const syncResult = {
|
||||
add(type, entries) {
|
||||
// TODO: deduplicate
|
||||
this[type] = (this[type] || []).concat(entries);
|
||||
},
|
||||
ok: true,
|
||||
created: [],
|
||||
updated: [],
|
||||
deleted: [],
|
||||
};
|
||||
const httpClient = this.httpClient();
|
||||
// Fetch collection metadata.
|
||||
const query = expectedTimestamp
|
||||
? { query: { _expected: expectedTimestamp } }
|
||||
: undefined;
|
||||
const metadata = await httpClient.getData({
|
||||
...query,
|
||||
headers,
|
||||
});
|
||||
await kintoCollection.db.saveMetadata(metadata);
|
||||
|
||||
const options = {
|
||||
lastModified: collectionLastModified,
|
||||
strategy: Kinto.syncStrategy.PULL_ONLY,
|
||||
expectedTimestamp,
|
||||
};
|
||||
|
||||
// Fetch last changes from the server.
|
||||
await kintoCollection.pullChanges(httpClient, syncResult, options);
|
||||
const { lastModified } = syncResult;
|
||||
await kintoCollection.db.saveLastModified(lastModified);
|
||||
return syncResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Import the JSON files from services/settings/dump into the local DB.
|
||||
*/
|
||||
|
@ -865,67 +808,114 @@ class RemoteSettingsClient extends EventEmitter {
|
|||
*
|
||||
* @returns {Promise<Object>} the computed sync result.
|
||||
*/
|
||||
async _retrySyncFromScratch(kintoCollection, expectedTimestamp) {
|
||||
// Fetch collection metadata.
|
||||
async _importChanges(kintoCollection, expectedTimestamp, options = {}) {
|
||||
const { clear = false, localRecords, localTimestamp } = options;
|
||||
|
||||
// Fetch collection metadata from server.
|
||||
const client = this.httpClient();
|
||||
const metadata = await client.getData({
|
||||
query: { _expected: expectedTimestamp },
|
||||
});
|
||||
// Fetch whole list of records.
|
||||
|
||||
// Fetch list of changes from server (or all records on clear)
|
||||
const {
|
||||
data: remoteRecords,
|
||||
last_modified: timestamp,
|
||||
last_modified: remoteTimestamp,
|
||||
} = await client.listRecords({
|
||||
sort: "id",
|
||||
filters: { _expected: expectedTimestamp },
|
||||
filters: {
|
||||
_expected: expectedTimestamp,
|
||||
},
|
||||
since: clear || !localTimestamp ? undefined : `${localTimestamp}`,
|
||||
});
|
||||
// Verify signature of remote content, before importing it locally.
|
||||
await this._validateCollectionSignature(remoteRecords, timestamp, metadata);
|
||||
|
||||
// The signature of this remote content is good (we haven't thrown).
|
||||
// Now we will store it locally. In order to replicate what `.sync()` returns
|
||||
// we will inspect what we had locally.
|
||||
const { data: oldData } = await kintoCollection.list({ order: "" }); // no need to sort.
|
||||
|
||||
// We build a sync result as if a diff-based sync was performed.
|
||||
const syncResult = { created: [], updated: [], deleted: [] };
|
||||
|
||||
// If the remote last_modified is newer than the local last_modified,
|
||||
// replace the local data
|
||||
const localLastModified = await kintoCollection.db.getLastModified();
|
||||
if (timestamp >= localLastModified) {
|
||||
console.debug(`Import raw data from server for ${this.identifier}`);
|
||||
const start = Cu.now() * 1000;
|
||||
await kintoCollection.clear();
|
||||
await kintoCollection.loadDump(remoteRecords);
|
||||
await kintoCollection.db.saveLastModified(timestamp);
|
||||
await kintoCollection.db.saveMetadata(metadata);
|
||||
if (gTimingEnabled) {
|
||||
const end = Cu.now() * 1000;
|
||||
PerformanceCounters.storeExecutionTime(
|
||||
`remotesettings/${this.identifier}`,
|
||||
"loadRawData",
|
||||
end - start,
|
||||
"duration"
|
||||
);
|
||||
}
|
||||
|
||||
// Compare local and remote to populate the sync result
|
||||
const oldById = new Map(oldData.map(e => [e.id, e]));
|
||||
for (const r of remoteRecords) {
|
||||
const old = oldById.get(r.id);
|
||||
if (old) {
|
||||
if (old.last_modified != r.last_modified) {
|
||||
syncResult.updated.push({ old, new: r });
|
||||
}
|
||||
oldById.delete(r.id);
|
||||
} else {
|
||||
syncResult.created.push(r);
|
||||
}
|
||||
}
|
||||
// Records that remain in our map now are those missing from remote
|
||||
syncResult.deleted = Array.from(oldById.values());
|
||||
const syncResult = { ok: true, created: [], updated: [], deleted: [] };
|
||||
// If data wasn't changed, return empty sync result.
|
||||
// This can happen when we update the signature but not the data.
|
||||
console.debug(
|
||||
`${this.identifier} local timestamp: ${localTimestamp}, remote: ${remoteTimestamp}`
|
||||
);
|
||||
if (localTimestamp && remoteTimestamp < localTimestamp) {
|
||||
return syncResult;
|
||||
}
|
||||
|
||||
// Read current local data.
|
||||
const oldRecords = localRecords; // oldLocal.map(r => kintoCollection.cleanLocalFields(r));
|
||||
|
||||
const toDelete = remoteRecords.filter(r => r.deleted);
|
||||
const toInsert = remoteRecords
|
||||
.filter(r => !r.deleted)
|
||||
.map(r => ({ ...r, _status: "synced" }));
|
||||
|
||||
console.debug(
|
||||
`${this.identifier} ${toDelete.length} to delete, ${toInsert.length} to insert`
|
||||
);
|
||||
|
||||
const start = Cu.now() * 1000;
|
||||
if (clear) {
|
||||
// In the retry situation, we fetched all server data,
|
||||
// and we clear all local data before applying updates.
|
||||
console.debug(`${this.identifier} clear local data`);
|
||||
await kintoCollection.db.clear();
|
||||
await kintoCollection.db.saveLastModified(null);
|
||||
await kintoCollection.db.saveMetadata(null);
|
||||
} else {
|
||||
// Otherwise delete local records for each tombstone.
|
||||
await kintoCollection.db.execute(transaction => {
|
||||
toDelete.forEach(r => {
|
||||
transaction.delete(r.id);
|
||||
});
|
||||
});
|
||||
}
|
||||
// Overwrite all other data.
|
||||
await kintoCollection.db.importBulk(toInsert);
|
||||
await kintoCollection.db.saveLastModified(remoteTimestamp);
|
||||
await kintoCollection.db.saveMetadata(metadata);
|
||||
if (gTimingEnabled) {
|
||||
const end = Cu.now() * 1000;
|
||||
PerformanceCounters.storeExecutionTime(
|
||||
`remotesettings/${this.identifier}`,
|
||||
"loadRawData",
|
||||
end - start,
|
||||
"duration"
|
||||
);
|
||||
}
|
||||
|
||||
// Read new local data, after updating.
|
||||
const { data: newLocal } = await kintoCollection.list({ order: "" }); // no need to sort.
|
||||
let newRecords = newLocal.map(r => kintoCollection.cleanLocalFields(r));
|
||||
|
||||
// Verify this new local DB.
|
||||
if (this.verifySignature) {
|
||||
await this._validateCollectionSignature(
|
||||
newRecords,
|
||||
remoteTimestamp,
|
||||
metadata
|
||||
);
|
||||
} else {
|
||||
console.warn(`${this.identifier} has signature disabled`);
|
||||
}
|
||||
|
||||
// Compute the changes, comparing records before and after.
|
||||
const oldById = new Map(oldRecords.map(e => [e.id, e]));
|
||||
for (const r of newRecords) {
|
||||
const old = oldById.get(r.id);
|
||||
if (old) {
|
||||
oldById.delete(r.id);
|
||||
if (r.last_modified != old.last_modified) {
|
||||
syncResult.updated.push({ old, new: r });
|
||||
}
|
||||
} else {
|
||||
syncResult.created.push(r);
|
||||
}
|
||||
}
|
||||
syncResult.deleted = syncResult.deleted.concat(
|
||||
Array.from(oldById.values())
|
||||
);
|
||||
console.debug(
|
||||
`${this.identifier} ${syncResult.created.length} created. ${syncResult.updated.length} updated. ${syncResult.deleted.length} deleted.`
|
||||
);
|
||||
|
||||
return syncResult;
|
||||
}
|
||||
|
||||
|
|
|
@ -129,7 +129,7 @@ add_task(
|
|||
await clientWithDump.maybeSync(timestamp);
|
||||
|
||||
const list = await clientWithDump.get();
|
||||
ok(list.length > 20, "The dump was loaded");
|
||||
ok(list.length > 20, `The dump was loaded (${list.length} records)`);
|
||||
equal(received.created[0].id, "xx", "Record from the sync come first.");
|
||||
|
||||
const createdById = received.created.reduce((acc, r) => {
|
||||
|
|
|
@ -63,6 +63,8 @@ function run_test() {
|
|||
// because these tests were originally written for OneCRL.
|
||||
client = RemoteSettings("signed", { signerName: SIGNER_NAME });
|
||||
|
||||
Services.prefs.setCharPref("services.settings.loglevel", "debug");
|
||||
|
||||
// set the content signing root to our test root
|
||||
setRoot();
|
||||
|
||||
|
@ -294,24 +296,12 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
responseBody: JSON.stringify({ data: [] }),
|
||||
};
|
||||
|
||||
// Valid signature for empty collection.
|
||||
const RESPONSE_BODY_META_EMPTY_SIG = makeMetaResponseBody(
|
||||
1000,
|
||||
"vxuAg5rDCB-1pul4a91vqSBQRXJG_j7WOYUTswxRSMltdYmbhLRH8R8brQ9YKuNDF56F-w6pn4HWxb076qgKPwgcEBtUeZAO_RtaHXRkRUUgVzAr86yQL4-aJTbv3D6u"
|
||||
);
|
||||
|
||||
const RESPONSE_META_NO_SIG = {
|
||||
sampleHeaders: [
|
||||
"Content-Type: application/json; charset=UTF-8",
|
||||
`ETag: \"123456\"`,
|
||||
],
|
||||
status: { status: 200, statusText: "OK" },
|
||||
responseBody: JSON.stringify({
|
||||
data: {
|
||||
last_modified: 123456,
|
||||
},
|
||||
}),
|
||||
};
|
||||
|
||||
// The collection metadata containing the signature for the empty
|
||||
// collection.
|
||||
const RESPONSE_META_EMPTY_SIG = makeMetaResponse(
|
||||
|
@ -334,6 +324,12 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
],
|
||||
};
|
||||
|
||||
//
|
||||
// 1.
|
||||
// - collection: undefined -> []
|
||||
// - timestamp: undefined -> 1000
|
||||
//
|
||||
|
||||
// .. and use this map to register handlers for each path
|
||||
registerHandlers(emptyCollectionResponses);
|
||||
|
||||
|
@ -343,12 +339,19 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
// well and throw if something goes wrong.
|
||||
await client.maybeSync(1000);
|
||||
|
||||
equal((await client.get()).length, 0);
|
||||
|
||||
let endHistogram = getUptakeTelemetrySnapshot(TELEMETRY_HISTOGRAM_KEY);
|
||||
|
||||
// ensure that a success histogram is tracked when a succesful sync occurs.
|
||||
let expectedIncrements = { [UptakeTelemetry.STATUS.SUCCESS]: 1 };
|
||||
checkUptakeTelemetry(startHistogram, endHistogram, expectedIncrements);
|
||||
|
||||
//
|
||||
// 2.
|
||||
// - collection: [] -> [RECORD2, RECORD1]
|
||||
// - timestamp: 1000 -> 3000
|
||||
//
|
||||
// Check that some additions (2 records) to the collection have a valid
|
||||
// signature.
|
||||
|
||||
|
@ -386,6 +389,13 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
registerHandlers(twoItemsResponses);
|
||||
await client.maybeSync(3000);
|
||||
|
||||
equal((await client.get()).length, 2);
|
||||
|
||||
//
|
||||
// 3.
|
||||
// - collection: [RECORD2, RECORD1] -> [RECORD2, RECORD3]
|
||||
// - timestamp: 3000 -> 4000
|
||||
//
|
||||
// Check the collection with one addition and one removal has a valid
|
||||
// signature
|
||||
|
||||
|
@ -423,6 +433,13 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
registerHandlers(oneAddedOneRemovedResponses);
|
||||
await client.maybeSync(4000);
|
||||
|
||||
equal((await client.get()).length, 2);
|
||||
|
||||
//
|
||||
// 4.
|
||||
// - collection: [RECORD2, RECORD3] -> [RECORD2, RECORD3]
|
||||
// - timestamp: 4000 -> 4100
|
||||
//
|
||||
// Check the signature is still valid with no operation (no changes)
|
||||
|
||||
// Leave the collection unchanged
|
||||
|
@ -447,10 +464,20 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
registerHandlers(noOpResponses);
|
||||
await client.maybeSync(4100);
|
||||
|
||||
// Check the collection is reset when the signature is invalid
|
||||
equal((await client.get()).length, 2);
|
||||
|
||||
//
|
||||
// 5.
|
||||
// - collection: [RECORD2, RECORD3] -> [RECORD2, RECORD3]
|
||||
// - timestamp: 4000 -> 5000
|
||||
//
|
||||
// Check the collection is reset when the signature is invalid.
|
||||
// Client will:
|
||||
// - Fetch metadata (with bad signature)
|
||||
// - Perform the sync (fetch empty changes)
|
||||
// - Refetch the metadata and the whole collection
|
||||
// - Validate signature successfully, but with no changes to emit.
|
||||
|
||||
// Prepare a (deliberately) bad signature to check the collection state is
|
||||
// reset if something is inconsistent
|
||||
const RESPONSE_COMPLETE_INITIAL = {
|
||||
comment: "RESPONSE_COMPLETE_INITIAL ",
|
||||
sampleHeaders: [
|
||||
|
@ -461,21 +488,12 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
responseBody: JSON.stringify({ data: [RECORD2, RECORD3] }),
|
||||
};
|
||||
|
||||
const RESPONSE_COMPLETE_INITIAL_SORTED_BY_ID = {
|
||||
comment: "RESPONSE_COMPLETE_INITIAL ",
|
||||
sampleHeaders: [
|
||||
"Content-Type: application/json; charset=UTF-8",
|
||||
'ETag: "4000"',
|
||||
],
|
||||
status: { status: 200, statusText: "OK" },
|
||||
responseBody: JSON.stringify({ data: [RECORD3, RECORD2] }),
|
||||
};
|
||||
|
||||
// Prepare a (deliberately) bad signature to check the collection state is
|
||||
// reset if something is inconsistent
|
||||
const RESPONSE_BODY_META_BAD_SIG = makeMetaResponseBody(
|
||||
4000,
|
||||
"aW52YWxpZCBzaWduYXR1cmUK"
|
||||
);
|
||||
|
||||
const RESPONSE_META_BAD_SIG = makeMetaResponse(
|
||||
4000,
|
||||
RESPONSE_BODY_META_BAD_SIG,
|
||||
|
@ -496,16 +514,11 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified&_since=4000": [
|
||||
RESPONSE_EMPTY_NO_UPDATE,
|
||||
],
|
||||
// The next request is for the full collection. This will be checked
|
||||
// against the valid signature - so the sync should succeed.
|
||||
"GET:/v1/buckets/main/collections/signed/records?_sort=-last_modified": [
|
||||
// The next request is for the full collection. This will be checked against the valid signature
|
||||
// - so the sync should succeed.
|
||||
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified": [
|
||||
RESPONSE_COMPLETE_INITIAL,
|
||||
],
|
||||
// The next request is for the full collection sorted by id. This will be
|
||||
// checked against the valid signature - so the sync should succeed.
|
||||
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=id": [
|
||||
RESPONSE_COMPLETE_INITIAL_SORTED_BY_ID,
|
||||
],
|
||||
};
|
||||
|
||||
registerHandlers(badSigGoodSigResponses);
|
||||
|
@ -519,6 +532,8 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
|
||||
await client.maybeSync(5000);
|
||||
|
||||
equal((await client.get()).length, 2);
|
||||
|
||||
endHistogram = getUptakeTelemetrySnapshot(TELEMETRY_HISTOGRAM_KEY);
|
||||
|
||||
// since we only fixed the signature, and no data was changed, the sync event
|
||||
|
@ -531,6 +546,18 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
expectedIncrements = { [UptakeTelemetry.STATUS.SIGNATURE_ERROR]: 1 };
|
||||
checkUptakeTelemetry(startHistogram, endHistogram, expectedIncrements);
|
||||
|
||||
//
|
||||
// 6.
|
||||
// - collection: [RECORD2, RECORD3] -> [RECORD2, RECORD3]
|
||||
// - timestamp: 4000 -> 5000
|
||||
//
|
||||
// Check the collection is reset when the signature is invalid.
|
||||
// Client will:
|
||||
// - Fetch metadata (with bad signature)
|
||||
// - Perform the sync (fetch empty changes)
|
||||
// - Refetch the whole collection and metadata
|
||||
// - Sync will be no-op since local is equal to server, no changes to emit.
|
||||
|
||||
const badSigGoodOldResponses = {
|
||||
// In this test, we deliberately serve a bad signature initially. The
|
||||
// subsequent sitnature returned is a valid one for the three item
|
||||
|
@ -544,11 +571,11 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified&_since=4000": [
|
||||
RESPONSE_EMPTY_NO_UPDATE,
|
||||
],
|
||||
// The next request is for the full collection sorted by id. This will be
|
||||
// The next request is for the full collection. This will be
|
||||
// checked against the valid signature and last_modified times will be
|
||||
// compared. Sync should fail, even though the signature is good,
|
||||
// compared. Sync should be a no-op, even though the signature is good,
|
||||
// because the local collection is newer.
|
||||
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=id": [
|
||||
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified": [
|
||||
RESPONSE_EMPTY_INITIAL,
|
||||
],
|
||||
};
|
||||
|
@ -569,6 +596,14 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
// thus the sync event is not sent.
|
||||
equal(syncEventSent, false);
|
||||
|
||||
//
|
||||
// 7.
|
||||
// - collection: [RECORD2, RECORD3] -> [RECORD2, RECORD3]
|
||||
// - timestamp: 4000 -> 5000
|
||||
//
|
||||
// Check that a tempered local DB will be overwritten and
|
||||
// sync event contain the appropriate data.
|
||||
|
||||
const badLocalContentGoodSigResponses = {
|
||||
// In this test, we deliberately serve a bad signature initially. The
|
||||
// subsequent signature returned is a valid one for the three item
|
||||
|
@ -582,17 +617,13 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=-last_modified": [
|
||||
RESPONSE_COMPLETE_INITIAL,
|
||||
],
|
||||
// The next request is for the full collection sorted by id. This will be
|
||||
// checked against the valid signature - so the sync should succeed.
|
||||
"GET:/v1/buckets/main/collections/signed/records?_expected=5000&_sort=id": [
|
||||
RESPONSE_COMPLETE_INITIAL_SORTED_BY_ID,
|
||||
],
|
||||
};
|
||||
|
||||
registerHandlers(badLocalContentGoodSigResponses);
|
||||
|
||||
// we create a local state manually here, in order to test that the sync event data
|
||||
// properly contains created, updated, and deleted records.
|
||||
// the local DB contains same id as RECORD2 and a fake record.
|
||||
// the final server collection contains RECORD2 and RECORD3
|
||||
const kintoCol = await client.openCollection();
|
||||
await kintoCol.clear();
|
||||
|
@ -603,14 +634,16 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
const localId = "0602b1b2-12ab-4d3a-b6fb-593244e7b035";
|
||||
await kintoCol.create({ id: localId }, { synced: true, useRecordId: true });
|
||||
|
||||
let syncData;
|
||||
let syncData = null;
|
||||
client.on("sync", ({ data }) => {
|
||||
syncData = data;
|
||||
});
|
||||
|
||||
await client.maybeSync(5000);
|
||||
|
||||
// Local data was unchanged, since it was never than the one returned by the server.
|
||||
// Local data was replaced. But we use records IDs to determine
|
||||
// what was created and deleted. So fake local data will appaer
|
||||
// in the sync event.
|
||||
equal(syncData.current.length, 2);
|
||||
equal(syncData.created.length, 1);
|
||||
equal(syncData.created[0].id, RECORD3.id);
|
||||
|
@ -620,6 +653,13 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
equal(syncData.deleted.length, 1);
|
||||
equal(syncData.deleted[0].id, localId);
|
||||
|
||||
//
|
||||
// 8.
|
||||
// - collection: [RECORD2, RECORD3] -> [RECORD2, RECORD3]
|
||||
// - timestamp: 4000 -> 5000
|
||||
//
|
||||
// Check that a failing signature throws after retry.
|
||||
|
||||
const allBadSigResponses = {
|
||||
// In this test, we deliberately serve only a bad signature.
|
||||
"GET:/v1/buckets/main/collections/signed?_expected=6000": [
|
||||
|
@ -633,8 +673,8 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
],
|
||||
// The next request is for the full collection sorted by id. This will be
|
||||
// checked against the valid signature - so the sync should succeed.
|
||||
"GET:/v1/buckets/main/collections/signed/records?_expected=6000&_sort=id": [
|
||||
RESPONSE_COMPLETE_INITIAL_SORTED_BY_ID,
|
||||
"GET:/v1/buckets/main/collections/signed/records?_expected=6000&_sort=-last_modified": [
|
||||
RESPONSE_COMPLETE_INITIAL,
|
||||
],
|
||||
};
|
||||
|
||||
|
@ -652,6 +692,26 @@ add_task(async function test_check_synchronization_with_signatures() {
|
|||
expectedIncrements = { [UptakeTelemetry.STATUS.SIGNATURE_RETRY_ERROR]: 1 };
|
||||
checkUptakeTelemetry(startHistogram, endHistogram, expectedIncrements);
|
||||
|
||||
//
|
||||
// 9.
|
||||
// - collection: [RECORD2, RECORD3] -> [RECORD2, RECORD3]
|
||||
// - timestamp: 5000 -> 6000
|
||||
//
|
||||
// Check that sync throws if metadata has no signature.
|
||||
|
||||
const RESPONSE_META_NO_SIG = {
|
||||
sampleHeaders: [
|
||||
"Content-Type: application/json; charset=UTF-8",
|
||||
`ETag: \"123456\"`,
|
||||
],
|
||||
status: { status: 200, statusText: "OK" },
|
||||
responseBody: JSON.stringify({
|
||||
data: {
|
||||
last_modified: 123456,
|
||||
},
|
||||
}),
|
||||
};
|
||||
|
||||
const missingSigResponses = {
|
||||
// In this test, we deliberately serve metadata without the signature attribute.
|
||||
// As if the collection was not signed.
|
||||
|
|
Загрузка…
Ссылка в новой задаче