Bug 1435588 - (part 3) Avoid applying the bookmarks buffer if we failed to store all records r=kitcambridge

MozReview-Commit-ID: GJ8OHBuh6m0

--HG--
extra : rebase_source : ea3fc302c64d4119d8e407411f9a3a1ba0b428cb
This commit is contained in:
Thom Chiovoloni 2018-02-22 18:24:02 -05:00
Родитель c82baa71a3
Коммит 28add38cd6
2 изменённых файлов: 103 добавлений и 12 удалений

Просмотреть файл

@ -723,15 +723,12 @@ BufferedBookmarksEngine.prototype = {
},
async _processIncoming(newitems) {
try {
await super._processIncoming(newitems);
} finally {
let buf = await this._store.ensureOpenMirror();
let recordsToUpload = await buf.apply({
remoteTimeSeconds: Resource.serverTime,
});
this._modified.replace(recordsToUpload);
}
await super._processIncoming(newitems);
let buf = await this._store.ensureOpenMirror();
let recordsToUpload = await buf.apply({
remoteTimeSeconds: Resource.serverTime,
});
this._modified.replace(recordsToUpload);
},
async _reconcile(item) {

Просмотреть файл

@ -220,12 +220,12 @@ add_bookmark_test(async function test_processIncoming_error_orderChildren(engine
// Verify that the bookmark order has been applied.
folder1_record = await store.createRecord(folder1.guid);
let new_children = folder1_record.children;
Assert.deepEqual(new_children,
[folder1_payload.children[0], folder1_payload.children[1]]);
Assert.deepEqual(new_children.sort(),
[folder1_payload.children[0], folder1_payload.children[1]].sort());
let localChildIds = await PlacesSyncUtils.bookmarks.fetchChildRecordIds(
folder1.guid);
Assert.deepEqual(localChildIds, [bmk2.guid, bmk1.guid]);
Assert.deepEqual(localChildIds.sort(), [bmk2.guid, bmk1.guid].sort());
} finally {
await cleanup(engine, server);
@ -877,3 +877,97 @@ add_task(async function test_sync_imap_URLs() {
await cleanup(engine, server);
}
});
add_task(async function test_resume_buffer() {
await Service.recordManager.clearCache();
let engine = new BufferedBookmarksEngine(Service);
await engine.initialize();
await engine._store.wipe();
await engine.resetClient();
let server = await serverForFoo(engine);
await SyncTestingInfrastructure(server);
let collection = server.user("foo").collection("bookmarks");
engine._tracker.start(); // We skip usual startup...
const batchChunkSize = 50;
engine._store._batchChunkSize = batchChunkSize;
try {
let children = [];
let timestamp = Math.floor(Date.now() / 1000);
// Add two chunks worth of records to the server
for (let i = 0; i < batchChunkSize * 2; ++i) {
let cleartext = {
id: Utils.makeGUID(),
type: "bookmark",
parentid: "toolbar",
title: `Bookmark ${i}`,
parentName: "Bookmarks Toolbar",
bmkUri: `https://example.com/${i}`,
};
let wbo = collection.insert(cleartext.id,
encryptPayload(cleartext),
timestamp + 10 * i);
// Something that is effectively random, but deterministic.
// (This is just to ensure we don't accidentally start using the
// sortindex again).
wbo.sortindex = 1000 + Math.round(Math.sin(i / 5) * 100);
children.push(cleartext.id);
}
// Add the parent of those records, and ensure its timestamp is the most recent.
collection.insert("toolbar", encryptPayload({
id: "toolbar",
type: "folder",
parentid: "places",
title: "Bookmarks Toolbar",
children
}), timestamp + 10 * children.length);
// Replace applyIncomingBatch with a custom one that calls the original,
// but forces it to throw on the 2nd chunk.
let origApplyIncomingBatch = engine._store.applyIncomingBatch;
engine._store.applyIncomingBatch = function(records) {
if (records.length > batchChunkSize) {
// Hacky way to make reading from the batchChunkSize'th record throw.
delete records[batchChunkSize];
Object.defineProperty(records, batchChunkSize, {
get() { throw new Error("D:"); }
});
}
return origApplyIncomingBatch.call(this, records);
};
let caughtError;
_("We expect this to fail");
try {
await sync_engine_and_validate_telem(engine, true);
} catch (e) {
caughtError = e;
}
Assert.ok(caughtError, "Expected engine.sync to throw");
Assert.equal(caughtError.message, "D:");
// The buffer subtracts one second from the actual timestamp.
let lastSync = await engine.getLastSync() + 1;
// We poisoned the batchChunkSize'th record, so the last successfully
// applied record will be batchChunkSize - 1.
let expectedLastSync = timestamp + 10 * (batchChunkSize - 1);
Assert.equal(expectedLastSync, lastSync);
engine._store.applyIncomingBatch = origApplyIncomingBatch;
await sync_engine_and_validate_telem(engine, false);
// Check that all the children made it onto the correct record.
let toolbarRecord = await engine._store.createRecord("toolbar");
Assert.deepEqual(toolbarRecord.children.sort(), children.sort());
} finally {
await cleanup(engine, server);
}
});