зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1429250 - Avoid repeated traversals of toFetch in _processIncoming. r=eoger,kitcambridge
MozReview-Commit-ID: N1Zr8iT7da --HG-- extra : rebase_source : ddaa51704c93985219d15b31aec7ff7e19b2832f
This commit is contained in:
Родитель
c6289726c7
Коммит
6321345295
|
@ -100,8 +100,8 @@ RotaryTracker.prototype = {
|
|||
this.RotaryEngine = function RotaryEngine(service) {
|
||||
SyncEngine.call(this, "Rotary", service);
|
||||
// Ensure that the engine starts with a clean slate.
|
||||
this.toFetch = [];
|
||||
this.previousFailed = [];
|
||||
this.toFetch = new SerializableSet();
|
||||
this.previousFailed = new SerializableSet();
|
||||
};
|
||||
RotaryEngine.prototype = {
|
||||
__proto__: SyncEngine.prototype,
|
||||
|
|
|
@ -194,9 +194,11 @@ class BookmarkRepairRequestor extends CollectionRepairRequestor {
|
|||
for (let id of validationInfo.problems.serverMissing) {
|
||||
engine.addForWeakUpload(id);
|
||||
}
|
||||
let toFetch = engine.toFetch.concat(validationInfo.problems.clientMissing,
|
||||
validationInfo.problems.serverDeleted);
|
||||
engine.toFetch = Array.from(new Set(toFetch));
|
||||
engine.toFetch = Utils.setAddAll(
|
||||
Utils.setAddAll(engine.toFetch,
|
||||
validationInfo.problems.clientMissing),
|
||||
validationInfo.problems.serverDeleted
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -842,6 +842,9 @@ SyncEngine.prototype = {
|
|||
if (!json.ids) {
|
||||
json.ids = [];
|
||||
}
|
||||
// The set serializes the same way as an array, but offers more efficient
|
||||
// methods of manipulation.
|
||||
json.ids = new SerializableSet(json.ids);
|
||||
return json;
|
||||
},
|
||||
|
||||
|
@ -911,6 +914,9 @@ SyncEngine.prototype = {
|
|||
},
|
||||
|
||||
set toFetch(ids) {
|
||||
if (ids.constructor.name != "SerializableSet") {
|
||||
throw new Error("Bug: Attempted to set toFetch to something that isn't a SerializableSet");
|
||||
}
|
||||
this._toFetchStorage.data = { ids };
|
||||
this._toFetchStorage.saveSoon();
|
||||
},
|
||||
|
@ -919,7 +925,12 @@ SyncEngine.prototype = {
|
|||
this._previousFailedStorage.ensureDataReady();
|
||||
return this._previousFailedStorage.data.ids;
|
||||
},
|
||||
|
||||
set previousFailed(ids) {
|
||||
if (ids.constructor.name != "SerializableSet") {
|
||||
throw new Error(
|
||||
"Bug: Attempted to set previousFailed to something that isn't a SerializableSet");
|
||||
}
|
||||
this._previousFailedStorage.data = { ids };
|
||||
this._previousFailedStorage.saveSoon();
|
||||
},
|
||||
|
@ -1100,7 +1111,7 @@ SyncEngine.prototype = {
|
|||
// reconciled => number of items that were reconciled.
|
||||
let count = {applied: 0, failed: 0, newFailed: 0, reconciled: 0};
|
||||
let recordsToApply = [];
|
||||
let failedInCurrentSync = [];
|
||||
let failedInCurrentSync = new SerializableSet();
|
||||
|
||||
let oldestModified = this.lastModified;
|
||||
let downloadedIDs = new Set();
|
||||
|
@ -1124,7 +1135,7 @@ SyncEngine.prototype = {
|
|||
|
||||
let { shouldApply, error } = await this._maybeReconcile(record);
|
||||
if (error) {
|
||||
failedInCurrentSync.push(record.id);
|
||||
failedInCurrentSync.add(record.id);
|
||||
count.failed++;
|
||||
continue;
|
||||
}
|
||||
|
@ -1136,7 +1147,7 @@ SyncEngine.prototype = {
|
|||
}
|
||||
|
||||
let failedToApply = await this._applyRecords(recordsToApply);
|
||||
failedInCurrentSync.push(...failedToApply);
|
||||
Utils.setAddAll(failedInCurrentSync, failedToApply);
|
||||
|
||||
// `applied` is a bit of a misnomer: it counts records that *should* be
|
||||
// applied, so it also includes records that we tried to apply and failed.
|
||||
|
@ -1165,7 +1176,7 @@ SyncEngine.prototype = {
|
|||
// that in case the Sync server doesn't support `older` (bug 1316110).
|
||||
let remainingIDs = guids.obj.filter(id => !downloadedIDs.has(id));
|
||||
if (remainingIDs.length > 0) {
|
||||
this.toFetch = Utils.arrayUnion(this.toFetch, remainingIDs);
|
||||
this.toFetch = Utils.setAddAll(this.toFetch, remainingIDs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1180,8 +1191,9 @@ SyncEngine.prototype = {
|
|||
// download limit, to prevent a large backlog for one engine from blocking
|
||||
// the others. We'll keep processing the backlog on subsequent engine syncs.
|
||||
let failedInPreviousSync = this.previousFailed;
|
||||
let idsToBackfill = Utils.arrayUnion(this.toFetch.slice(0, downloadLimit),
|
||||
failedInPreviousSync);
|
||||
let idsToBackfill = Array.from(
|
||||
Utils.setAddAll(Utils.subsetOfSize(this.toFetch, downloadLimit),
|
||||
failedInPreviousSync));
|
||||
|
||||
// Note that we intentionally overwrite the previously failed list here.
|
||||
// Records that fail to decrypt or apply in two consecutive syncs are likely
|
||||
|
@ -1230,20 +1242,21 @@ SyncEngine.prototype = {
|
|||
count.failed += failedToApply.length;
|
||||
count.applied += backfilledRecordsToApply.length;
|
||||
|
||||
this.toFetch = Utils.arraySub(this.toFetch, ids);
|
||||
this.previousFailed = Utils.arrayUnion(this.previousFailed, failedInBackfill);
|
||||
this.toFetch = Utils.setDeleteAll(this.toFetch, ids);
|
||||
this.previousFailed = Utils.setAddAll(this.previousFailed, failedInBackfill);
|
||||
|
||||
if (this.lastSync < this.lastModified) {
|
||||
this.lastSync = this.lastModified;
|
||||
}
|
||||
}
|
||||
|
||||
count.newFailed = this.previousFailed.reduce((count, engine) => {
|
||||
if (failedInPreviousSync.indexOf(engine) == -1) {
|
||||
count++;
|
||||
count.newFailed = 0;
|
||||
for (let item of this.previousFailed) {
|
||||
if (!failedInPreviousSync.has(item)) {
|
||||
++count.newFailed;
|
||||
}
|
||||
return count;
|
||||
}, 0);
|
||||
}
|
||||
|
||||
count.succeeded = Math.max(0, count.applied - count.failed);
|
||||
this._log.info(["Records:",
|
||||
count.applied, "applied,",
|
||||
|
@ -1811,8 +1824,8 @@ SyncEngine.prototype = {
|
|||
|
||||
async _resetClient() {
|
||||
this.resetLastSync();
|
||||
this.previousFailed = [];
|
||||
this.toFetch = [];
|
||||
this.previousFailed = new SerializableSet();
|
||||
this.toFetch = new SerializableSet();
|
||||
this._needWeakUpload.clear();
|
||||
},
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
this.EXPORTED_SYMBOLS = ["Utils", "Svc"];
|
||||
this.EXPORTED_SYMBOLS = ["Utils", "Svc", "SerializableSet"];
|
||||
|
||||
var {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
|
||||
|
||||
|
@ -524,6 +524,47 @@ this.Utils = {
|
|||
return foo.concat(Utils.arraySub(bar, foo));
|
||||
},
|
||||
|
||||
/**
|
||||
* Add all the items in `items` to the provided Set in-place.
|
||||
*
|
||||
* @return The provided set.
|
||||
*/
|
||||
setAddAll(set, items) {
|
||||
for (let item of items) {
|
||||
set.add(item);
|
||||
}
|
||||
return set;
|
||||
},
|
||||
|
||||
/**
|
||||
* Delete every items in `items` to the provided Set in-place.
|
||||
*
|
||||
* @return The provided set.
|
||||
*/
|
||||
setDeleteAll(set, items) {
|
||||
for (let item of items) {
|
||||
set.delete(item);
|
||||
}
|
||||
return set;
|
||||
},
|
||||
|
||||
/**
|
||||
* Take the first `size` items from the Set `items`.
|
||||
*
|
||||
* @return A Set of size at most `size`
|
||||
*/
|
||||
subsetOfSize(items, size) {
|
||||
let result = new Set();
|
||||
let count = 0;
|
||||
for (let item of items) {
|
||||
if (count++ == size) {
|
||||
return result;
|
||||
}
|
||||
result.add(item);
|
||||
}
|
||||
return result;
|
||||
},
|
||||
|
||||
bind2: function Async_bind2(object, method) {
|
||||
return function innerBind() { return method.apply(object, arguments); };
|
||||
},
|
||||
|
@ -703,6 +744,15 @@ this.Utils = {
|
|||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A subclass of Set that serializes as an Array when passed to JSON.stringify.
|
||||
*/
|
||||
class SerializableSet extends Set {
|
||||
toJSON() {
|
||||
return Array.from(this);
|
||||
}
|
||||
}
|
||||
|
||||
XPCOMUtils.defineLazyGetter(Utils, "_utf8Converter", function() {
|
||||
let converter = Cc["@mozilla.org/intl/scriptableunicodeconverter"]
|
||||
.createInstance(Ci.nsIScriptableUnicodeConverter);
|
||||
|
|
|
@ -170,7 +170,7 @@ add_task(async function test_processIncoming_error_orderChildren() {
|
|||
// Make the 10 minutes old so it will only be synced in the toFetch phase.
|
||||
bogus_record.modified = Date.now() / 1000 - 60 * 10;
|
||||
engine.lastSync = Date.now() / 1000 - 60;
|
||||
engine.toFetch = [BOGUS_GUID];
|
||||
engine.toFetch = new SerializableSet([BOGUS_GUID]);
|
||||
|
||||
let error;
|
||||
try {
|
||||
|
|
|
@ -69,7 +69,7 @@ add_task(async function test_history_download_limit() {
|
|||
let ping = await sync_engine_and_validate_telem(engine, false);
|
||||
deepEqual(ping.engines[0].incoming, { applied: 5 });
|
||||
|
||||
let backlogAfterFirstSync = engine.toFetch.slice(0);
|
||||
let backlogAfterFirstSync = Array.from(engine.toFetch).sort();
|
||||
deepEqual(backlogAfterFirstSync, ["place0000000", "place0000001",
|
||||
"place0000002", "place0000003", "place0000004", "place0000005",
|
||||
"place0000006", "place0000007", "place0000008", "place0000009"]);
|
||||
|
@ -84,7 +84,7 @@ add_task(async function test_history_download_limit() {
|
|||
// After the second sync, our backlog still contains the same GUIDs: we
|
||||
// weren't able to make progress on fetching them, since our
|
||||
// `guidFetchBatchSize` is 0.
|
||||
let backlogAfterSecondSync = engine.toFetch.slice(0);
|
||||
let backlogAfterSecondSync = Array.from(engine.toFetch).sort();
|
||||
deepEqual(backlogAfterFirstSync, backlogAfterSecondSync);
|
||||
|
||||
// Now add a newer record to the server.
|
||||
|
@ -105,7 +105,7 @@ add_task(async function test_history_download_limit() {
|
|||
deepEqual(ping.engines[0].incoming, { applied: 1 });
|
||||
|
||||
// Our backlog should remain the same.
|
||||
let backlogAfterThirdSync = engine.toFetch.slice(0);
|
||||
let backlogAfterThirdSync = Array.from(engine.toFetch).sort();
|
||||
deepEqual(backlogAfterSecondSync, backlogAfterThirdSync);
|
||||
|
||||
equal(engine.lastSync, lastSync + 20);
|
||||
|
@ -118,15 +118,16 @@ add_task(async function test_history_download_limit() {
|
|||
ping = await sync_engine_and_validate_telem(engine, false);
|
||||
deepEqual(ping.engines[0].incoming, { applied: 5 });
|
||||
|
||||
deepEqual(engine.toFetch, ["place0000005", "place0000006", "place0000007",
|
||||
"place0000008", "place0000009"]);
|
||||
deepEqual(
|
||||
Array.from(engine.toFetch).sort(),
|
||||
["place0000005", "place0000006", "place0000007", "place0000008", "place0000009"]);
|
||||
|
||||
// Sync again to clear out the backlog.
|
||||
engine.lastModified = collection.modified;
|
||||
ping = await sync_engine_and_validate_telem(engine, false);
|
||||
deepEqual(ping.engines[0].incoming, { applied: 5 });
|
||||
|
||||
deepEqual(engine.toFetch, []);
|
||||
deepEqual(Array.from(engine.toFetch), []);
|
||||
await PlacesTestUtils.clearHistory();
|
||||
});
|
||||
|
||||
|
|
|
@ -12,6 +12,16 @@ async function makeSteamEngine() {
|
|||
return engine;
|
||||
}
|
||||
|
||||
function guidSetOfSize(length) {
|
||||
return new SerializableSet(
|
||||
Array.from({ length }, () => Utils.makeGUID()));
|
||||
}
|
||||
|
||||
function assertSetsEqual(a, b) {
|
||||
// Assert.deepEqual doesn't understand Set.
|
||||
Assert.deepEqual(Array.from(a).sort(), Array.from(b).sort());
|
||||
}
|
||||
|
||||
async function testSteamEngineStorage(test) {
|
||||
try {
|
||||
let setupEngine = await makeSteamEngine();
|
||||
|
@ -113,10 +123,10 @@ add_task(async function test_toFetch() {
|
|||
const filename = "weave/toFetch/steam.json";
|
||||
|
||||
await testSteamEngineStorage({
|
||||
toFetch: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
|
||||
toFetch: guidSetOfSize(3),
|
||||
setup(engine) {
|
||||
// Ensure pristine environment
|
||||
Assert.equal(engine.toFetch.length, 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
|
||||
// Write file to disk
|
||||
engine.toFetch = this.toFetch;
|
||||
|
@ -124,13 +134,13 @@ add_task(async function test_toFetch() {
|
|||
},
|
||||
check(engine) {
|
||||
// toFetch is written asynchronously
|
||||
Assert.deepEqual(engine.toFetch, this.toFetch);
|
||||
assertSetsEqual(engine.toFetch, this.toFetch);
|
||||
},
|
||||
});
|
||||
|
||||
await testSteamEngineStorage({
|
||||
toFetch: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
|
||||
toFetch2: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
|
||||
toFetch: guidSetOfSize(4),
|
||||
toFetch2: guidSetOfSize(5),
|
||||
setup(engine) {
|
||||
// Make sure it work for consecutive writes before the callback is executed.
|
||||
engine.toFetch = this.toFetch;
|
||||
|
@ -140,12 +150,12 @@ add_task(async function test_toFetch() {
|
|||
Assert.equal(engine.toFetch, this.toFetch2);
|
||||
},
|
||||
check(engine) {
|
||||
Assert.deepEqual(engine.toFetch, this.toFetch2);
|
||||
assertSetsEqual(engine.toFetch, this.toFetch2);
|
||||
},
|
||||
});
|
||||
|
||||
await testSteamEngineStorage({
|
||||
toFetch: [Utils.makeGUID(), Utils.makeGUID()],
|
||||
toFetch: guidSetOfSize(2),
|
||||
async beforeCheck() {
|
||||
let toFetchPath = OS.Path.join(OS.Constants.Path.profileDir, filename);
|
||||
let bytes = new TextEncoder().encode(JSON.stringify(this.toFetch));
|
||||
|
@ -154,7 +164,7 @@ add_task(async function test_toFetch() {
|
|||
},
|
||||
check(engine) {
|
||||
// Read file from disk
|
||||
Assert.deepEqual(engine.toFetch, this.toFetch);
|
||||
assertSetsEqual(engine.toFetch, this.toFetch);
|
||||
},
|
||||
});
|
||||
});
|
||||
|
@ -165,10 +175,10 @@ add_task(async function test_previousFailed() {
|
|||
const filename = "weave/failed/steam.json";
|
||||
|
||||
await testSteamEngineStorage({
|
||||
previousFailed: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
|
||||
previousFailed: guidSetOfSize(3),
|
||||
setup(engine) {
|
||||
// Ensure pristine environment
|
||||
Assert.equal(engine.previousFailed.length, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
|
||||
// Write file to disk
|
||||
engine.previousFailed = this.previousFailed;
|
||||
|
@ -176,13 +186,13 @@ add_task(async function test_previousFailed() {
|
|||
},
|
||||
check(engine) {
|
||||
// previousFailed is written asynchronously
|
||||
Assert.deepEqual(engine.previousFailed, this.previousFailed);
|
||||
assertSetsEqual(engine.previousFailed, this.previousFailed);
|
||||
},
|
||||
});
|
||||
|
||||
await testSteamEngineStorage({
|
||||
previousFailed: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
|
||||
previousFailed2: [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()],
|
||||
previousFailed: guidSetOfSize(4),
|
||||
previousFailed2: guidSetOfSize(5),
|
||||
setup(engine) {
|
||||
// Make sure it work for consecutive writes before the callback is executed.
|
||||
engine.previousFailed = this.previousFailed;
|
||||
|
@ -192,12 +202,12 @@ add_task(async function test_previousFailed() {
|
|||
Assert.equal(engine.previousFailed, this.previousFailed2);
|
||||
},
|
||||
check(engine) {
|
||||
Assert.deepEqual(engine.previousFailed, this.previousFailed2);
|
||||
assertSetsEqual(engine.previousFailed, this.previousFailed2);
|
||||
},
|
||||
});
|
||||
|
||||
await testSteamEngineStorage({
|
||||
previousFailed: [Utils.makeGUID(), Utils.makeGUID()],
|
||||
previousFailed: guidSetOfSize(2),
|
||||
async beforeCheck() {
|
||||
let previousFailedPath = OS.Path.join(OS.Constants.Path.profileDir,
|
||||
filename);
|
||||
|
@ -207,7 +217,7 @@ add_task(async function test_previousFailed() {
|
|||
},
|
||||
check(engine) {
|
||||
// Read file from disk
|
||||
Assert.deepEqual(engine.previousFailed, this.previousFailed);
|
||||
assertSetsEqual(engine.previousFailed, this.previousFailed);
|
||||
},
|
||||
});
|
||||
});
|
||||
|
@ -220,18 +230,18 @@ add_task(async function test_resetClient() {
|
|||
// Ensure pristine environment
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSync"), undefined);
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
|
||||
Assert.equal(engine.toFetch.length, 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
|
||||
engine.lastSync = 123.45;
|
||||
engine.lastSyncLocal = 67890;
|
||||
engine.toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
|
||||
engine.previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
|
||||
engine.toFetch = guidSetOfSize(4);
|
||||
engine.previousFailed = guidSetOfSize(3);
|
||||
|
||||
await engine.resetClient();
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(engine.lastSyncLocal, 0);
|
||||
Assert.equal(engine.toFetch.length, 0);
|
||||
Assert.equal(engine.previousFailed.length, 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
} finally {
|
||||
Svc.Prefs.resetBranch("");
|
||||
}
|
||||
|
@ -252,13 +262,13 @@ add_task(async function test_wipeServer() {
|
|||
try {
|
||||
// Some data to reset.
|
||||
engine.lastSync = 123.45;
|
||||
engine.toFetch = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
|
||||
engine.toFetch = guidSetOfSize(3),
|
||||
|
||||
_("Wipe server data and reset client.");
|
||||
await engine.wipeServer();
|
||||
Assert.equal(steamCollection.payload, undefined);
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(engine.toFetch.length, 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
|
||||
} finally {
|
||||
steamServer.stop(do_test_finished);
|
||||
|
|
|
@ -627,8 +627,8 @@ add_task(async function test_processIncoming_resume_toFetch() {
|
|||
// Time travel 10 seconds into the future but still download the above WBOs.
|
||||
let engine = makeRotaryEngine();
|
||||
engine.lastSync = LASTSYNC;
|
||||
engine.toFetch = ["flying", "scotsman"];
|
||||
engine.previousFailed = ["failed0", "failed1", "failed2"];
|
||||
engine.toFetch = new SerializableSet(["flying", "scotsman"]);
|
||||
engine.previousFailed = new SerializableSet(["failed0", "failed1", "failed2"]);
|
||||
|
||||
let server = sync_httpd_setup({
|
||||
"/1.1/foo/storage/rotary": collection.handler()
|
||||
|
@ -657,7 +657,7 @@ add_task(async function test_processIncoming_resume_toFetch() {
|
|||
Assert.equal(engine._store.items.failed0, "Record No. 0");
|
||||
Assert.equal(engine._store.items.failed1, "Record No. 1");
|
||||
Assert.equal(engine._store.items.failed2, "Record No. 2");
|
||||
Assert.equal(engine.previousFailed.length, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
} finally {
|
||||
await cleanAndGo(engine, server);
|
||||
}
|
||||
|
@ -703,8 +703,8 @@ add_task(async function test_processIncoming_notify_count() {
|
|||
try {
|
||||
// Confirm initial environment.
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(engine.toFetch.length, 0);
|
||||
Assert.equal(engine.previousFailed.length, 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
do_check_empty(engine._store.items);
|
||||
|
||||
let called = 0;
|
||||
|
@ -722,8 +722,8 @@ add_task(async function test_processIncoming_notify_count() {
|
|||
|
||||
// Confirm failures.
|
||||
do_check_attribute_count(engine._store.items, 12);
|
||||
Assert.deepEqual(engine.previousFailed, ["record-no-00", "record-no-05",
|
||||
"record-no-10"]);
|
||||
Assert.deepEqual(Array.from(engine.previousFailed).sort(),
|
||||
["record-no-00", "record-no-05", "record-no-10"].sort());
|
||||
|
||||
// There are newly failed records and they are reported.
|
||||
Assert.equal(called, 1);
|
||||
|
@ -737,7 +737,7 @@ add_task(async function test_processIncoming_notify_count() {
|
|||
|
||||
// Confirming removed failures.
|
||||
do_check_attribute_count(engine._store.items, 14);
|
||||
Assert.deepEqual(engine.previousFailed, ["record-no-00"]);
|
||||
Assert.deepEqual(Array.from(engine.previousFailed), ["record-no-00"]);
|
||||
|
||||
Assert.equal(called, 2);
|
||||
Assert.equal(counts.failed, 1);
|
||||
|
@ -792,12 +792,12 @@ add_task(async function test_processIncoming_previousFailed() {
|
|||
try {
|
||||
// Confirm initial environment.
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(engine.toFetch.length, 0);
|
||||
Assert.equal(engine.previousFailed.length, 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
do_check_empty(engine._store.items);
|
||||
|
||||
// Initial failed items in previousFailed to be reset.
|
||||
let previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
|
||||
let previousFailed = new SerializableSet([Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()]);
|
||||
engine.previousFailed = previousFailed;
|
||||
Assert.equal(engine.previousFailed, previousFailed);
|
||||
|
||||
|
@ -807,9 +807,10 @@ add_task(async function test_processIncoming_previousFailed() {
|
|||
|
||||
// Expected result: 4 sync batches with 2 failures each => 8 failures
|
||||
do_check_attribute_count(engine._store.items, 6);
|
||||
Assert.deepEqual(engine.previousFailed, ["record-no-00", "record-no-01",
|
||||
"record-no-04", "record-no-05", "record-no-08", "record-no-09",
|
||||
"record-no-12", "record-no-13"]);
|
||||
Assert.deepEqual(
|
||||
Array.from(engine.previousFailed).sort(),
|
||||
["record-no-00", "record-no-01", "record-no-04", "record-no-05",
|
||||
"record-no-08", "record-no-09", "record-no-12", "record-no-13"].sort());
|
||||
|
||||
// Sync again with the same failed items (records 0, 1, 8, 9).
|
||||
await engine._processIncoming();
|
||||
|
@ -817,8 +818,9 @@ add_task(async function test_processIncoming_previousFailed() {
|
|||
// A second sync with the same failed items should not add the same items again.
|
||||
// Items that did not fail a second time should no longer be in previousFailed.
|
||||
do_check_attribute_count(engine._store.items, 10);
|
||||
Assert.deepEqual(engine.previousFailed, ["record-no-00", "record-no-01",
|
||||
"record-no-08", "record-no-09"]);
|
||||
Assert.deepEqual(
|
||||
Array.from(engine.previousFailed).sort(),
|
||||
["record-no-00", "record-no-01", "record-no-08", "record-no-09"].sort());
|
||||
|
||||
// Refetched items that didn't fail the second time are in engine._store.items.
|
||||
Assert.equal(engine._store.items["record-no-04"], "Record No. 4");
|
||||
|
@ -900,8 +902,8 @@ add_task(async function test_processIncoming_failed_records() {
|
|||
|
||||
// Confirm initial environment
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(engine.toFetch.length, 0);
|
||||
Assert.equal(engine.previousFailed.length, 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
do_check_empty(engine._store.items);
|
||||
|
||||
let observerSubject;
|
||||
|
@ -920,8 +922,8 @@ add_task(async function test_processIncoming_failed_records() {
|
|||
NUMBER_OF_RECORDS - BOGUS_RECORDS.length);
|
||||
|
||||
// Ensure that the bogus records will be fetched again on the next sync.
|
||||
Assert.equal(engine.previousFailed.length, BOGUS_RECORDS.length);
|
||||
Assert.deepEqual(engine.previousFailed.sort(), BOGUS_RECORDS.sort());
|
||||
Assert.equal(engine.previousFailed.size, BOGUS_RECORDS.length);
|
||||
Assert.deepEqual(Array.from(engine.previousFailed).sort(), BOGUS_RECORDS.sort());
|
||||
|
||||
// Ensure the observer was notified
|
||||
Assert.equal(observerData, engine.name);
|
||||
|
@ -999,8 +1001,8 @@ add_task(async function test_processIncoming_decrypt_failed() {
|
|||
try {
|
||||
|
||||
// Confirm initial state
|
||||
Assert.equal(engine.toFetch.length, 0);
|
||||
Assert.equal(engine.previousFailed.length, 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
|
||||
let observerSubject;
|
||||
let observerData;
|
||||
|
@ -1016,11 +1018,11 @@ add_task(async function test_processIncoming_decrypt_failed() {
|
|||
Assert.equal(ping.engines[0].incoming.failed, 4);
|
||||
Assert.equal(ping.engines[0].incoming.newFailed, 4);
|
||||
|
||||
Assert.equal(engine.previousFailed.length, 4);
|
||||
Assert.equal(engine.previousFailed[0], "nojson");
|
||||
Assert.equal(engine.previousFailed[1], "nojson2");
|
||||
Assert.equal(engine.previousFailed[2], "nodecrypt");
|
||||
Assert.equal(engine.previousFailed[3], "nodecrypt2");
|
||||
Assert.equal(engine.previousFailed.size, 4);
|
||||
Assert.ok(engine.previousFailed.has("nojson"));
|
||||
Assert.ok(engine.previousFailed.has("nojson2"));
|
||||
Assert.ok(engine.previousFailed.has("nodecrypt"));
|
||||
Assert.ok(engine.previousFailed.has("nodecrypt2"));
|
||||
|
||||
// Ensure the observer was notified
|
||||
Assert.equal(observerData, engine.name);
|
||||
|
|
|
@ -122,7 +122,7 @@ add_task(async function test_processIncoming_error() {
|
|||
// Make the 10 minutes old so it will only be synced in the toFetch phase.
|
||||
bogus_record.modified = Date.now() / 1000 - 60 * 10;
|
||||
engine.lastSync = Date.now() / 1000 - 60;
|
||||
engine.toFetch = [BOGUS_GUID];
|
||||
engine.toFetch = new SerializableSet([BOGUS_GUID]);
|
||||
|
||||
let error, pingPayload, fullPing;
|
||||
try {
|
||||
|
|
|
@ -208,7 +208,7 @@
|
|||
"Traversal.jsm": ["TraversalRules", "TraversalHelper"],
|
||||
"UpdateTelemetry.jsm": ["AUSTLMY"],
|
||||
"UpdateTopLevelContentWindowIDHelper.jsm": ["trackBrowserWindow"],
|
||||
"util.js": ["getChromeWindow", "Utils", "Svc"],
|
||||
"util.js": ["getChromeWindow", "Utils", "Svc", "SerializableSet"],
|
||||
"utils.js": ["btoa", "encryptPayload", "makeIdentityConfig", "makeFxAccountsInternalMock", "configureFxAccountIdentity", "configureIdentity", "SyncTestingInfrastructure", "waitForZeroTimer", "Promise", "MockFxaStorageManager", "AccountState", "sumHistogram", "CommonUtils", "CryptoUtils", "TestingUtils", "promiseZeroTimer", "promiseNamedTimer", "getLoginTelemetryScalar", "syncTestLogging"],
|
||||
"Utils.jsm": ["Utils", "Logger", "PivotContext", "PrefCache"],
|
||||
"VariablesView.jsm": ["VariablesView", "escapeHTML"],
|
||||
|
|
Загрузка…
Ссылка в новой задаче