зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1199077 - Convert `lastSync` and `syncID` into async accessors. r=eoger
MozReview-Commit-ID: Ltkmo8qAgjK --HG-- extra : rebase_source : e33d3b873fe9638520605ed33fe2359519ae1760
This commit is contained in:
Родитель
03ddb93873
Коммит
c108e1ce76
|
@ -657,21 +657,19 @@ function SyncEngine(name, service) {
|
|||
dataPostProcessor: json => this._metadataPostProcessor(json),
|
||||
beforeSave: () => this._beforeSaveMetadata(),
|
||||
});
|
||||
Utils.defineLazyIDProperty(this, "syncID", `services.sync.${this.name}.syncID`);
|
||||
|
||||
XPCOMUtils.defineLazyPreferenceGetter(this, "_enabled",
|
||||
`services.sync.engine.${this.prefName}`, false,
|
||||
(data, previous, latest) =>
|
||||
// We do not await on the promise onEngineEnabledChanged returns.
|
||||
this._tracker.onEngineEnabledChanged(latest));
|
||||
XPCOMUtils.defineLazyPreferenceGetter(this, "_syncID",
|
||||
`services.sync.${this.name}.syncID`,
|
||||
"");
|
||||
XPCOMUtils.defineLazyPreferenceGetter(this, "_lastSync",
|
||||
`services.sync.${this.name}.lastSync`,
|
||||
"0", null,
|
||||
v => parseFloat(v));
|
||||
XPCOMUtils.defineLazyPreferenceGetter(this, "_lastSyncLocal",
|
||||
`services.sync.${this.name}.lastSyncLocal`,
|
||||
"0", null,
|
||||
v => parseInt(v, 10));
|
||||
// Async initializations can be made in the initialize() method.
|
||||
|
||||
// The map of ids => metadata for records needing a weak upload.
|
||||
|
@ -846,26 +844,86 @@ SyncEngine.prototype = {
|
|||
return new Changeset();
|
||||
},
|
||||
|
||||
/**
|
||||
* Returns the local sync ID for this engine, or `""` if the engine hasn't
|
||||
* synced for the first time. This is exposed for tests.
|
||||
*
|
||||
* @return the current sync ID.
|
||||
*/
|
||||
async getSyncID() {
|
||||
return this._syncID;
|
||||
},
|
||||
|
||||
/**
|
||||
* Ensures that the local sync ID for the engine matches the sync ID for the
|
||||
* collection on the server. A mismatch indicates that another client wiped
|
||||
* the collection; we're syncing after a node reassignment, and another
|
||||
* client synced before us; or the store was replaced since the last sync.
|
||||
* In case of a mismatch, we need to reset all local Sync state and start
|
||||
* over as a first sync.
|
||||
*
|
||||
* In most cases, this method should return the new sync ID as-is. However, an
|
||||
* engine may ignore the given ID and assign a different one, if it determines
|
||||
* that the sync ID on the server is out of date. The bookmarks engine uses
|
||||
* this to wipe the server and other clients on the first sync after the user
|
||||
* restores from a backup.
|
||||
*
|
||||
* @param newSyncID
|
||||
* The new sync ID for the collection from `meta/global`.
|
||||
* @return The assigned sync ID. If this doesn't match `newSyncID`, we'll
|
||||
* replace the sync ID in `meta/global` with the assigned ID.
|
||||
*/
|
||||
async ensureCurrentSyncID(newSyncID) {
|
||||
let existingSyncID = this._syncID;
|
||||
if (existingSyncID == newSyncID) {
|
||||
return existingSyncID;
|
||||
}
|
||||
this._log.debug("Engine syncIDs: " + [newSyncID, existingSyncID]);
|
||||
this.setSyncIDPref(newSyncID);
|
||||
return newSyncID;
|
||||
},
|
||||
|
||||
/**
|
||||
* Resets the local sync ID for the engine, wipes the server, and resets all
|
||||
* local Sync state to start over as a first sync.
|
||||
*
|
||||
* @return the new sync ID.
|
||||
*/
|
||||
async resetSyncID() {
|
||||
let newSyncID = await this.resetLocalSyncID();
|
||||
await this.wipeServer();
|
||||
return newSyncID;
|
||||
},
|
||||
|
||||
/**
|
||||
* Resets the local sync ID for the engine, signaling that we're starting over
|
||||
* as a first sync.
|
||||
*
|
||||
* @return the new sync ID.
|
||||
*/
|
||||
async resetLocalSyncID() {
|
||||
return this.setSyncIDPref(Utils.makeGUID());
|
||||
},
|
||||
|
||||
setSyncIDPref(syncID) {
|
||||
Svc.Prefs.set(this.name + ".syncID", syncID);
|
||||
Svc.Prefs.set(this.name + ".lastSync", "0");
|
||||
return syncID;
|
||||
},
|
||||
|
||||
/*
|
||||
* lastSync is a timestamp in server time.
|
||||
*/
|
||||
async getLastSync() {
|
||||
return this.lastSync;
|
||||
},
|
||||
async setLastSync(lastSync) {
|
||||
this.lastSync = lastSync;
|
||||
},
|
||||
get lastSync() {
|
||||
return this._lastSync;
|
||||
},
|
||||
set lastSync(value) {
|
||||
async setLastSync(lastSync) {
|
||||
// Store the value as a string to keep floating point precision
|
||||
Svc.Prefs.set(this.name + ".lastSync", value.toString());
|
||||
Svc.Prefs.set(this.name + ".lastSync", lastSync.toString());
|
||||
},
|
||||
resetLastSync() {
|
||||
async resetLastSync() {
|
||||
this._log.debug("Resetting " + this.name + " last sync time");
|
||||
Svc.Prefs.set(this.name + ".lastSync", "0");
|
||||
this.lastSyncLocal = 0;
|
||||
await this.setLastSync(0);
|
||||
},
|
||||
|
||||
get toFetch() {
|
||||
|
@ -895,17 +953,6 @@ SyncEngine.prototype = {
|
|||
this._previousFailedStorage.saveSoon();
|
||||
},
|
||||
|
||||
/*
|
||||
* lastSyncLocal is a timestamp in local time.
|
||||
*/
|
||||
get lastSyncLocal() {
|
||||
return this._lastSyncLocal;
|
||||
},
|
||||
set lastSyncLocal(value) {
|
||||
// Store as a string because pref can only store C longs as numbers.
|
||||
Svc.Prefs.set(this.name + ".lastSyncLocal", value.toString());
|
||||
},
|
||||
|
||||
/*
|
||||
* Returns a changeset for this sync. Engine implementations can override this
|
||||
* method to bypass the tracker for certain or all changed items.
|
||||
|
@ -942,19 +989,17 @@ SyncEngine.prototype = {
|
|||
let engines = metaGlobal.payload.engines || {};
|
||||
let engineData = engines[this.name] || {};
|
||||
|
||||
let needsWipe = false;
|
||||
|
||||
// Assume missing versions are 0 and wipe the server
|
||||
if ((engineData.version || 0) < this.version) {
|
||||
this._log.debug("Old engine data: " + [engineData.version, this.version]);
|
||||
|
||||
// Prepare to clear the server and upload everything
|
||||
needsWipe = true;
|
||||
this.syncID = "";
|
||||
// Clear the server and reupload everything on bad version or missing
|
||||
// meta. Note that we don't regenerate per-collection keys here.
|
||||
let newSyncID = await this.resetSyncID();
|
||||
|
||||
// Set the newer version and newly generated syncID
|
||||
engineData.version = this.version;
|
||||
engineData.syncID = this.syncID;
|
||||
engineData.syncID = newSyncID;
|
||||
|
||||
// Put the new data back into meta/global and mark for upload
|
||||
engines[this.name] = engineData;
|
||||
|
@ -968,26 +1013,20 @@ SyncEngine.prototype = {
|
|||
let error = new String("New data: " + [engineData.version, this.version]);
|
||||
error.failureCode = VERSION_OUT_OF_DATE;
|
||||
throw error;
|
||||
} else if (engineData.syncID != this.syncID) {
|
||||
} else {
|
||||
// Changes to syncID mean we'll need to upload everything
|
||||
this._log.debug("Engine syncIDs: " + [engineData.syncID, this.syncID]);
|
||||
this.syncID = engineData.syncID;
|
||||
await this._resetClient();
|
||||
let assignedSyncID = await this.ensureCurrentSyncID(engineData.syncID);
|
||||
if (assignedSyncID != engineData.syncID) {
|
||||
engineData.syncID = assignedSyncID;
|
||||
metaGlobal.changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Delete any existing data and reupload on bad version or missing meta.
|
||||
// No crypto component here...? We could regenerate per-collection keys...
|
||||
if (needsWipe) {
|
||||
await this.wipeServer();
|
||||
}
|
||||
|
||||
// Save objects that need to be uploaded in this._modified. We also save
|
||||
// the timestamp of this fetch in this.lastSyncLocal. As we successfully
|
||||
// upload objects we remove them from this._modified. If an error occurs
|
||||
// or any objects fail to upload, they will remain in this._modified. At
|
||||
// the end of a sync, or after an error, we add all objects remaining in
|
||||
// this._modified to the tracker.
|
||||
this.lastSyncLocal = Date.now();
|
||||
// Save objects that need to be uploaded in this._modified. As we
|
||||
// successfully upload objects we remove them from this._modified. If an
|
||||
// error occurs or any objects fail to upload, they will remain in
|
||||
// this._modified. At the end of a sync, or after an error, we add all
|
||||
// objects remaining in this._modified to the tracker.
|
||||
let initialChanges = await this.pullChanges();
|
||||
this._modified.replace(initialChanges);
|
||||
// Clear the tracker now. If the sync fails we'll add the ones we failed
|
||||
|
@ -1792,19 +1831,27 @@ SyncEngine.prototype = {
|
|||
return canDecrypt;
|
||||
},
|
||||
|
||||
async _resetClient() {
|
||||
this.resetLastSync();
|
||||
this.previousFailed = new SerializableSet();
|
||||
this.toFetch = new SerializableSet();
|
||||
this._needWeakUpload.clear();
|
||||
/**
|
||||
* Deletes the collection for this engine on the server, and removes all local
|
||||
* Sync metadata for this engine. This does *not* remove any existing data on
|
||||
* other clients. This is called when we reset the sync ID.
|
||||
*/
|
||||
async wipeServer() {
|
||||
await this._deleteServerCollection();
|
||||
await this._resetClient();
|
||||
},
|
||||
|
||||
async wipeServer() {
|
||||
/**
|
||||
* Deletes the collection for this engine on the server, without removing
|
||||
* any local Sync metadata or user data. Deleting the collection will not
|
||||
* remove any user data on other clients, but will force other clients to
|
||||
* start over as a first sync.
|
||||
*/
|
||||
async _deleteServerCollection() {
|
||||
let response = await this.service.resource(this.engineURL).delete();
|
||||
if (response.status != 200 && response.status != 404) {
|
||||
throw response;
|
||||
}
|
||||
await this._resetClient();
|
||||
},
|
||||
|
||||
async removeClientData() {
|
||||
|
@ -1878,16 +1925,23 @@ SyncEngine.prototype = {
|
|||
},
|
||||
|
||||
/**
|
||||
* Get rid of any local meta-data.
|
||||
* Removes all local Sync metadata for this engine, but keeps all existing
|
||||
* local user data.
|
||||
*/
|
||||
async resetClient() {
|
||||
if (!this._resetClient) {
|
||||
throw new Error("engine does not implement _resetClient method");
|
||||
}
|
||||
|
||||
return this._notify("reset-client", this.name, this._resetClient)();
|
||||
},
|
||||
|
||||
async _resetClient() {
|
||||
await this.resetLastSync();
|
||||
this.previousFailed = new SerializableSet();
|
||||
this.toFetch = new SerializableSet();
|
||||
this._needWeakUpload.clear();
|
||||
},
|
||||
|
||||
/**
|
||||
* Removes all local Sync metadata and user data for this engine.
|
||||
*/
|
||||
async wipeClient() {
|
||||
return this._notify("wipe-client", this.name, this._wipeClient)();
|
||||
},
|
||||
|
|
|
@ -162,7 +162,8 @@ AddonsEngine.prototype = {
|
|||
changes[id] = modified;
|
||||
}
|
||||
|
||||
let lastSyncDate = new Date(this.lastSync * 1000);
|
||||
let lastSync = await this.getLastSync();
|
||||
let lastSyncDate = new Date(lastSync * 1000);
|
||||
|
||||
// The reconciler should have been refreshed at the beginning of a sync and
|
||||
// we assume this function is only called from within a sync.
|
||||
|
@ -221,7 +222,8 @@ AddonsEngine.prototype = {
|
|||
* highly unlikely to occur. Still, we exercise defense just in case.
|
||||
*/
|
||||
async _syncCleanup() {
|
||||
let ms = 1000 * this.lastSync - PRUNE_ADDON_CHANGES_THRESHOLD;
|
||||
let lastSync = await this.getLastSync();
|
||||
let ms = 1000 * lastSync - PRUNE_ADDON_CHANGES_THRESHOLD;
|
||||
this._reconciler.pruneChangesBeforeDate(new Date(ms));
|
||||
return SyncEngine.prototype._syncCleanup.call(this);
|
||||
},
|
||||
|
|
|
@ -523,7 +523,8 @@ BookmarksEngine.prototype = {
|
|||
|
||||
try {
|
||||
// For first-syncs, make a backup for the user to restore
|
||||
if (this.lastSync == 0) {
|
||||
let lastSync = await this.getLastSync();
|
||||
if (!lastSync) {
|
||||
this._log.debug("Bookmarks backup starting.");
|
||||
await PlacesBackups.create(null, true);
|
||||
this._log.debug("Bookmarks backup done.");
|
||||
|
@ -707,7 +708,7 @@ BufferedBookmarksEngine.prototype = {
|
|||
await mirror.setCollectionLastModified(lastSync);
|
||||
// Update the pref so that reverting to the original bookmarks engine
|
||||
// doesn't download records we've already applied.
|
||||
super.lastSync = lastSync;
|
||||
await super.setLastSync(lastSync);
|
||||
},
|
||||
|
||||
get lastSync() {
|
||||
|
|
|
@ -85,8 +85,6 @@ Utils.deferGetSet(ClientsRec,
|
|||
function ClientEngine(service) {
|
||||
SyncEngine.call(this, "Clients", service);
|
||||
|
||||
// Reset the last sync timestamp on every startup so that we fetch all clients
|
||||
this.resetLastSync();
|
||||
this.fxAccounts = fxAccounts;
|
||||
this.addClientCommandQueue = Promise.resolve();
|
||||
Utils.defineLazyIDProperty(this, "localID", "services.sync.client.GUID");
|
||||
|
@ -100,6 +98,11 @@ ClientEngine.prototype = {
|
|||
_knownStaleFxADeviceIds: null,
|
||||
_lastDeviceCounts: null,
|
||||
|
||||
async initialize() {
|
||||
// Reset the last sync timestamp on every startup so that we fetch all clients
|
||||
await this.resetLastSync();
|
||||
},
|
||||
|
||||
// These two properties allow us to avoid replaying the same commands
|
||||
// continuously if we cannot manage to upload our own record.
|
||||
_localClientLastModified: 0,
|
||||
|
@ -371,7 +374,7 @@ ClientEngine.prototype = {
|
|||
|
||||
async _processIncoming() {
|
||||
// Fetch all records from the server.
|
||||
this.lastSync = 0;
|
||||
await this.setLastSync(0);
|
||||
this._incomingClients = {};
|
||||
try {
|
||||
await SyncEngine.prototype._processIncoming.call(this);
|
||||
|
@ -448,9 +451,10 @@ ClientEngine.prototype = {
|
|||
let updatedIDs = this._modified.ids();
|
||||
await SyncEngine.prototype._uploadOutgoing.call(this);
|
||||
// Record the response time as the server time for each item we uploaded.
|
||||
let lastSync = await this.getLastSync();
|
||||
for (let id of updatedIDs) {
|
||||
if (id != this.localID) {
|
||||
this._store._remoteClients[id].serverLastModified = this.lastSync;
|
||||
this._store._remoteClients[id].serverLastModified = lastSync;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -26,13 +26,17 @@ const EHTestsCommon = {
|
|||
},
|
||||
|
||||
async sync_httpd_setup() {
|
||||
let clientsEngine = Service.clientsEngine;
|
||||
let clientsSyncID = await clientsEngine.resetLocalSyncID();
|
||||
let catapultEngine = Service.engineManager.get("catapult");
|
||||
let catapultSyncID = await catapultEngine.resetLocalSyncID();
|
||||
let global = new ServerWBO("global", {
|
||||
syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
engines: {clients: {version: Service.clientsEngine.version,
|
||||
syncID: Service.clientsEngine.syncID},
|
||||
catapult: {version: Service.engineManager.get("catapult").version,
|
||||
syncID: Service.engineManager.get("catapult").syncID}}
|
||||
engines: {clients: {version: clientsEngine.version,
|
||||
syncID: clientsSyncID},
|
||||
catapult: {version: catapultEngine.version,
|
||||
syncID: catapultSyncID}}
|
||||
});
|
||||
let clientsColl = new ServerCollection({}, true);
|
||||
|
||||
|
|
|
@ -505,9 +505,10 @@ async function registerRotaryEngine() {
|
|||
|
||||
await Service.engineManager.register(RotaryEngine);
|
||||
let engine = Service.engineManager.get("rotary");
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
engine.enabled = true;
|
||||
|
||||
return { engine, tracker: engine._tracker };
|
||||
return { engine, syncID, tracker: engine._tracker };
|
||||
}
|
||||
|
||||
// Set the validation prefs to attempt validation every time to avoid non-determinism.
|
||||
|
@ -527,16 +528,13 @@ async function serverForEnginesWithKeys(users, engines, callback) {
|
|||
|
||||
let allEngines = [Service.clientsEngine].concat(engines);
|
||||
|
||||
let globalEngines = allEngines.reduce((entries, engine) => {
|
||||
let { name, version, syncID } = engine;
|
||||
entries[name] = { version, syncID };
|
||||
return entries;
|
||||
}, {});
|
||||
let globalEngines = {};
|
||||
for (let engine of allEngines) {
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
globalEngines[engine.name] = { version: engine.version, syncID };
|
||||
}
|
||||
|
||||
let contents = allEngines.reduce((collections, engine) => {
|
||||
collections[engine.name] = {};
|
||||
return collections;
|
||||
}, {
|
||||
let contents = {
|
||||
meta: {
|
||||
global: {
|
||||
syncID: Service.syncID,
|
||||
|
@ -547,7 +545,10 @@ async function serverForEnginesWithKeys(users, engines, callback) {
|
|||
crypto: {
|
||||
keys: encryptPayload(wbo.cleartext),
|
||||
},
|
||||
});
|
||||
};
|
||||
for (let engine of allEngines) {
|
||||
contents[engine.name] = {};
|
||||
}
|
||||
|
||||
return serverForUsers(users, contents, callback);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,8 @@ add_task(async function test_412_not_treated_as_failure() {
|
|||
let saw412 = false;
|
||||
let _uploadOutgoing = engine._uploadOutgoing;
|
||||
engine._uploadOutgoing = async () => {
|
||||
engine.lastSync -= 2;
|
||||
let lastSync = await engine.getLastSync();
|
||||
await engine.setLastSync(lastSync - 2);
|
||||
try {
|
||||
await _uploadOutgoing.call(engine);
|
||||
} catch (ex) {
|
||||
|
|
|
@ -19,6 +19,7 @@ prefs.set("extensions.getAddons.get.url",
|
|||
prefs.set("extensions.install.requireSecureOrigin", false);
|
||||
|
||||
let engine;
|
||||
let syncID;
|
||||
let reconciler;
|
||||
let tracker;
|
||||
|
||||
|
@ -37,6 +38,7 @@ add_task(async function setup() {
|
|||
|
||||
await Service.engineManager.register(AddonsEngine);
|
||||
engine = Service.engineManager.get("addons");
|
||||
syncID = await engine.resetLocalSyncID();
|
||||
reconciler = engine._reconciler;
|
||||
tracker = engine._tracker;
|
||||
|
||||
|
@ -177,8 +179,7 @@ add_task(async function test_disabled_install_semantics() {
|
|||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
let contents = {
|
||||
meta: {global: {engines: {addons: {version: engine.version,
|
||||
syncID: engine.syncID}}}},
|
||||
meta: {global: {engines: {addons: {version: engine.version, syncID}}}},
|
||||
crypto: {},
|
||||
addons: {}
|
||||
};
|
||||
|
|
|
@ -159,7 +159,8 @@ add_task(async function test_dupe_bookmark() {
|
|||
PlacesUtils.bookmarks.addObserver(obs, false);
|
||||
|
||||
_("Syncing so new dupe record is processed");
|
||||
engine.lastSync = engine.lastSync - 5;
|
||||
let lastSync = await engine.getLastSync();
|
||||
await engine.setLastSync(lastSync - 5);
|
||||
await engine.sync();
|
||||
|
||||
// We should have logically deleted the dupe record.
|
||||
|
@ -220,7 +221,8 @@ add_task(async function test_dupe_reparented_bookmark() {
|
|||
collection.insert(newGUID, encryptPayload(to_apply), Date.now() / 1000 + 500);
|
||||
|
||||
_("Syncing so new dupe record is processed");
|
||||
engine.lastSync = engine.lastSync - 5;
|
||||
let lastSync = await engine.getLastSync();
|
||||
await engine.setLastSync(lastSync - 5);
|
||||
await engine.sync();
|
||||
|
||||
// We should have logically deleted the dupe record.
|
||||
|
@ -300,7 +302,7 @@ add_task(async function test_dupe_reparented_locally_changed_bookmark() {
|
|||
_("Syncing so new dupe record is processed");
|
||||
// We need to take care to only sync the one new record - if we also see
|
||||
// our local item as incoming the test fails - bug 1368608.
|
||||
engine.lastSync = newWBO.modified - 0.000001;
|
||||
await engine.setLastSync(newWBO.modified - 0.000001);
|
||||
engine.lastModified = null;
|
||||
await engine.sync();
|
||||
|
||||
|
@ -392,7 +394,8 @@ add_task(async function test_dupe_reparented_to_earlier_appearing_parent_bookmar
|
|||
|
||||
|
||||
_("Syncing so new records are processed.");
|
||||
engine.lastSync = engine.lastSync - 5;
|
||||
let lastSync = await engine.getLastSync();
|
||||
await engine.setLastSync(lastSync - 5);
|
||||
await engine.sync();
|
||||
|
||||
// Everything should be parented correctly.
|
||||
|
@ -468,7 +471,8 @@ add_task(async function test_dupe_reparented_to_later_appearing_parent_bookmark(
|
|||
}), Date.now() / 1000 + 500);
|
||||
|
||||
_("Syncing so out-of-order records are processed.");
|
||||
engine.lastSync = engine.lastSync - 5;
|
||||
let lastSync = await engine.getLastSync();
|
||||
await engine.setLastSync(lastSync - 5);
|
||||
await engine.sync();
|
||||
|
||||
// The intended parent did end up existing, so it should be parented
|
||||
|
@ -524,7 +528,10 @@ add_task(async function test_dupe_reparented_to_future_arriving_parent_bookmark(
|
|||
}), Date.now() / 1000 + 500);
|
||||
|
||||
_("Syncing so new dupe record is processed");
|
||||
engine.lastSync = engine.lastSync - 5;
|
||||
{
|
||||
let lastSync = await engine.getLastSync();
|
||||
await engine.setLastSync(lastSync - 5);
|
||||
}
|
||||
await engine.sync();
|
||||
|
||||
// We should have logically deleted the dupe record.
|
||||
|
@ -582,7 +589,10 @@ add_task(async function test_dupe_reparented_to_future_arriving_parent_bookmark(
|
|||
|
||||
|
||||
_("Syncing so missing parent appears");
|
||||
engine.lastSync = engine.lastSync - 5;
|
||||
{
|
||||
let lastSync = await engine.getLastSync();
|
||||
await engine.setLastSync(lastSync - 5);
|
||||
}
|
||||
await engine.sync();
|
||||
|
||||
// The intended parent now does exist, so it should have been reparented.
|
||||
|
@ -639,7 +649,8 @@ add_task(async function test_dupe_empty_folder() {
|
|||
}), Date.now() / 1000 + 500);
|
||||
|
||||
_("Syncing so new dupe records are processed");
|
||||
engine.lastSync = engine.lastSync - 5;
|
||||
let lastSync = await engine.getLastSync();
|
||||
await engine.setLastSync(lastSync - 5);
|
||||
await engine.sync();
|
||||
|
||||
await validate(collection);
|
||||
|
|
|
@ -12,6 +12,8 @@ async function serverForFoo(engine) {
|
|||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
let clientsEngine = Service.clientsEngine;
|
||||
let clientsSyncID = await clientsEngine.resetLocalSyncID();
|
||||
let engineSyncID = await engine.resetLocalSyncID();
|
||||
return serverForUsers({"foo": "password"}, {
|
||||
meta: {
|
||||
global: {
|
||||
|
@ -20,11 +22,11 @@ async function serverForFoo(engine) {
|
|||
engines: {
|
||||
clients: {
|
||||
version: clientsEngine.version,
|
||||
syncID: clientsEngine.syncID,
|
||||
syncID: clientsSyncID,
|
||||
},
|
||||
[engine.name]: {
|
||||
version: engine.version,
|
||||
syncID: engine.syncID,
|
||||
syncID: engineSyncID,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -731,6 +733,7 @@ add_task(async function test_bookmark_order() {
|
|||
index: 4,
|
||||
}], "Move 20 back to front -> update 20, f30");
|
||||
|
||||
engine.resetClient();
|
||||
await engine.wipeClient();
|
||||
await Service.startOver();
|
||||
await engine.finalize();
|
||||
});
|
||||
|
|
|
@ -348,7 +348,7 @@ add_task(async function test_bookmark_repair_integration() {
|
|||
await cleanup(server);
|
||||
clientsEngine = Service.clientsEngine = new ClientEngine(Service);
|
||||
clientsEngine.ignoreLastModifiedOnProcessCommands = true;
|
||||
clientsEngine.initialize();
|
||||
await clientsEngine.initialize();
|
||||
}
|
||||
});
|
||||
|
||||
|
|
|
@ -200,6 +200,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
|
||||
_("Verify that queries with the same anno and different URL dupe");
|
||||
{
|
||||
let lastSync = await engine.getLastSync();
|
||||
let info = await newSmartBookmark(PlacesUtils.bookmarks.menuGuid,
|
||||
"place:bar", -1, title,
|
||||
"MostVisited");
|
||||
|
@ -207,7 +208,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
|
||||
let record = await store.createRecord(info.guid);
|
||||
record.id = Utils.makeGUID();
|
||||
collection.insert(record.id, encryptPayload(record.cleartext), engine.lastSync + 1);
|
||||
collection.insert(record.id, encryptPayload(record.cleartext), lastSync + 1);
|
||||
|
||||
collection.insert("menu", encryptPayload({
|
||||
id: "menu",
|
||||
|
@ -215,7 +216,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
type: "folder",
|
||||
title: "Bookmarks Menu",
|
||||
children: [record.id],
|
||||
}), engine.lastSync + 1);
|
||||
}), lastSync + 1);
|
||||
|
||||
engine.lastModified = collection.timestamp;
|
||||
await sync_engine_and_validate_telem(engine, false);
|
||||
|
@ -226,6 +227,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
|
||||
_("Verify that different annos don't dupe.");
|
||||
{
|
||||
let lastSync = await engine.getLastSync();
|
||||
let info = await newSmartBookmark(PlacesUtils.bookmarks.unfiledGuid,
|
||||
"place:foo", -1, title, "LeastVisited");
|
||||
let idForOldGUID = await PlacesUtils.promiseItemId(info.guid);
|
||||
|
@ -233,7 +235,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
let other = await store.createRecord(info.guid);
|
||||
other.id = "abcdefabcdef";
|
||||
other.queryId = "MostVisited";
|
||||
collection.insert(other.id, encryptPayload(other.cleartext), engine.lastSync + 1);
|
||||
collection.insert(other.id, encryptPayload(other.cleartext), lastSync + 1);
|
||||
|
||||
collection.insert("unfiled", encryptPayload({
|
||||
id: "unfiled",
|
||||
|
@ -241,7 +243,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
type: "folder",
|
||||
title: "Other Bookmarks",
|
||||
children: [other.id],
|
||||
}), engine.lastSync + 1);
|
||||
}), lastSync + 1);
|
||||
|
||||
engine.lastModified = collection.timestamp;
|
||||
await sync_engine_and_validate_telem(engine, false);
|
||||
|
@ -252,6 +254,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
|
||||
_("Handle records without a queryId entry.");
|
||||
{
|
||||
let lastSync = await engine.getLastSync();
|
||||
let info = await newSmartBookmark(PlacesUtils.bookmarks.mobileGuid, url,
|
||||
-1, title, "MostVisited");
|
||||
let idForOldGUID = await PlacesUtils.promiseItemId(info.guid);
|
||||
|
@ -259,7 +262,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
let record = await store.createRecord(info.guid);
|
||||
record.id = Utils.makeGUID();
|
||||
delete record.queryId;
|
||||
collection.insert(record.id, encryptPayload(record.cleartext), engine.lastSync + 1);
|
||||
collection.insert(record.id, encryptPayload(record.cleartext), lastSync + 1);
|
||||
|
||||
collection.insert("mobile", encryptPayload({
|
||||
id: "mobile",
|
||||
|
@ -267,7 +270,7 @@ add_task(async function test_smart_bookmarks_duped() {
|
|||
type: "folder",
|
||||
title: "Mobile Bookmarks",
|
||||
children: [record.id],
|
||||
}), engine.lastSync + 1);
|
||||
}), lastSync + 1);
|
||||
|
||||
engine.lastModified = collection.timestamp;
|
||||
await sync_engine_and_validate_telem(engine, false);
|
||||
|
|
|
@ -64,7 +64,7 @@ function promiseSpinningly(promise) {
|
|||
|
||||
|
||||
async function cleanup() {
|
||||
engine.lastSync = 0;
|
||||
await engine.setLastSync(0);
|
||||
engine._needWeakUpload.clear();
|
||||
await store.wipe();
|
||||
await resetTracker();
|
||||
|
|
|
@ -796,7 +796,7 @@ add_task(async function test_command_sync() {
|
|||
notEqual(clientWBO(remoteId).payload, undefined);
|
||||
|
||||
Svc.Prefs.set("client.GUID", remoteId);
|
||||
engine._resetClient();
|
||||
await engine._resetClient();
|
||||
equal(engine.localID, remoteId);
|
||||
_("Performing sync on resetted client.");
|
||||
await syncClientsEngine(server);
|
||||
|
@ -1386,7 +1386,7 @@ add_task(async function test_keep_cleared_commands_after_reboot() {
|
|||
// Reset service (remove mocks)
|
||||
engine = Service.clientsEngine = new ClientEngine(Service);
|
||||
await engine.initialize();
|
||||
engine._resetClient();
|
||||
await engine._resetClient();
|
||||
|
||||
try {
|
||||
server.deleteCollections("foo");
|
||||
|
@ -1724,8 +1724,8 @@ add_task(async function test_duplicate_commands_telemetry() {
|
|||
add_task(async function test_other_clients_notified_on_first_sync() {
|
||||
_("Ensure that other clients are notified when we upload our client record for the first time.");
|
||||
|
||||
engine.resetLastSync();
|
||||
engine._store.wipe();
|
||||
await engine.resetLastSync();
|
||||
await engine._store.wipe();
|
||||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
let server = await serverForFoo(engine);
|
||||
|
|
|
@ -24,10 +24,10 @@ add_task(async function test_processIncoming_abort() {
|
|||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
_("Create some server data.");
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
_("Fake applyIncoming to abort.");
|
||||
engine._store.applyIncoming = async function(record) {
|
||||
let ex = {code: SyncEngine.prototype.eEngineAbortApplyIncoming,
|
||||
|
|
|
@ -269,6 +269,10 @@ add_task(async function test_bookmark_change_during_sync() {
|
|||
|
||||
enableValidationPrefs();
|
||||
|
||||
let engine = Service.engineManager.get("bookmarks");
|
||||
let server = await serverForEnginesWithKeys({"foo": "password"}, [engine]);
|
||||
await SyncTestingInfrastructure(server);
|
||||
|
||||
// Already-tracked bookmarks that shouldn't be uploaded during the first sync.
|
||||
let bzBmk = await PlacesUtils.bookmarks.insert({
|
||||
parentGuid: PlacesUtils.bookmarks.menuGuid,
|
||||
|
@ -283,9 +287,6 @@ add_task(async function test_bookmark_change_during_sync() {
|
|||
syncStatus: PlacesUtils.bookmarks.SYNC_STATUS.NORMAL,
|
||||
});
|
||||
|
||||
let engine = Service.engineManager.get("bookmarks");
|
||||
let server = await serverForEnginesWithKeys({"foo": "password"}, [engine]);
|
||||
await SyncTestingInfrastructure(server);
|
||||
let collection = server.user("foo").collection("bookmarks");
|
||||
|
||||
let bmk3; // New child of Folder 1, created locally during sync.
|
||||
|
|
|
@ -28,8 +28,8 @@ async function sync_httpd_setup() {
|
|||
let upd = collectionsHelper.with_updated_collection;
|
||||
|
||||
let catapultEngine = engineManager.get("catapult");
|
||||
let engines = {catapult: {version: catapultEngine.version,
|
||||
syncID: catapultEngine.syncID}};
|
||||
let syncID = await catapultEngine.resetLocalSyncID();
|
||||
let engines = {catapult: {version: catapultEngine.version, syncID}};
|
||||
|
||||
// Track these using the collections helper, which keeps modified times
|
||||
// up-to-date.
|
||||
|
|
|
@ -203,14 +203,13 @@ add_task(async function test_momentary_401_engine() {
|
|||
let john = server.user("johndoe");
|
||||
|
||||
_("Enabling the Rotary engine.");
|
||||
let { engine, tracker } = await registerRotaryEngine();
|
||||
let { engine, syncID, tracker } = await registerRotaryEngine();
|
||||
|
||||
// We need the server to be correctly set up prior to experimenting. Do this
|
||||
// through a sync.
|
||||
let global = {syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
rotary: {version: engine.version, syncID}};
|
||||
john.createCollection("meta").insert("global", global);
|
||||
|
||||
_("First sync to prepare server contents.");
|
||||
|
|
|
@ -58,7 +58,7 @@ add_task(async function test_history_download_limit() {
|
|||
|
||||
// We have 15 records on the server since the last sync, but our download
|
||||
// limit is 5 records at a time. We should eventually fetch all 15.
|
||||
engine.lastSync = lastSync;
|
||||
await engine.setLastSync(lastSync);
|
||||
engine.downloadBatchSize = 4;
|
||||
engine.downloadLimit = 5;
|
||||
|
||||
|
@ -75,7 +75,7 @@ add_task(async function test_history_download_limit() {
|
|||
"place0000006", "place0000007", "place0000008", "place0000009"]);
|
||||
|
||||
// We should have fast-forwarded the last sync time.
|
||||
equal(engine.lastSync, lastSync + 15);
|
||||
equal(await engine.getLastSync(), lastSync + 15);
|
||||
|
||||
engine.lastModified = collection.modified;
|
||||
ping = await sync_engine_and_validate_telem(engine, false);
|
||||
|
@ -108,7 +108,7 @@ add_task(async function test_history_download_limit() {
|
|||
let backlogAfterThirdSync = Array.from(engine.toFetch).sort();
|
||||
deepEqual(backlogAfterSecondSync, backlogAfterThirdSync);
|
||||
|
||||
equal(engine.lastSync, lastSync + 20);
|
||||
equal(await engine.getLastSync(), lastSync + 20);
|
||||
|
||||
// Bump the fetch batch size to let the backlog make progress. We should
|
||||
// make 3 requests to fetch 5 backlogged GUIDs.
|
||||
|
@ -176,7 +176,7 @@ add_task(async function test_history_visit_roundtrip() {
|
|||
}, Date.now() / 1000 + 10);
|
||||
|
||||
// Force a remote sync.
|
||||
engine.lastSync = Date.now() / 1000 - 30;
|
||||
await engine.setLastSync(Date.now() / 1000 - 30);
|
||||
await sync_engine_and_validate_telem(engine, false);
|
||||
|
||||
// Make sure that we didn't duplicate the visit when inserting. (Prior to bug
|
||||
|
@ -240,7 +240,7 @@ add_task(async function test_history_visit_dedupe_old() {
|
|||
);
|
||||
}, Date.now() / 1000 + 10);
|
||||
|
||||
engine.lastSync = Date.now() / 1000 - 30;
|
||||
await engine.setLastSync(Date.now() / 1000 - 30);
|
||||
await sync_engine_and_validate_telem(engine, false);
|
||||
|
||||
allVisits = (await PlacesUtils.history.fetch("https://www.example.com", {
|
||||
|
|
|
@ -22,18 +22,20 @@ async function shared_setup() {
|
|||
|
||||
hmacErrorCount = 0;
|
||||
|
||||
let clientsEngine = Service.clientsEngine;
|
||||
let clientsSyncID = await clientsEngine.resetLocalSyncID();
|
||||
|
||||
// Make sure RotaryEngine is the only one we sync.
|
||||
let { engine, tracker } = await registerRotaryEngine();
|
||||
engine.lastSync = 123; // Needs to be non-zero so that tracker is queried.
|
||||
let { engine, syncID, tracker } = await registerRotaryEngine();
|
||||
await engine.setLastSync(123); // Needs to be non-zero so that tracker is queried.
|
||||
engine._store.items = {flying: "LNER Class A3 4472",
|
||||
scotsman: "Flying Scotsman"};
|
||||
await tracker.addChangedID("scotsman", 0);
|
||||
Assert.equal(1, Service.engineManager.getEnabled().length);
|
||||
|
||||
let engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID},
|
||||
clients: {version: Service.clientsEngine.version,
|
||||
syncID: Service.clientsEngine.syncID}};
|
||||
let engines = {rotary: {version: engine.version, syncID},
|
||||
clients: {version: clientsEngine.version,
|
||||
syncID: clientsSyncID}};
|
||||
|
||||
// Common server objects.
|
||||
let global = new ServerWBO("global", {engines});
|
||||
|
@ -83,7 +85,7 @@ add_task(async function hmac_error_during_404() {
|
|||
|
||||
_("Partially resetting client, as if after a restart, and forcing redownload.");
|
||||
Service.collectionKeys.clear();
|
||||
engine.lastSync = 0; // So that we redownload records.
|
||||
await engine.setLastSync(0); // So that we redownload records.
|
||||
key404Counter = 1;
|
||||
_("---------------------------");
|
||||
await sync_and_validate_telem();
|
||||
|
@ -206,11 +208,12 @@ add_task(async function hmac_error_during_node_reassignment() {
|
|||
|
||||
// Kick off another sync. Can't just call it, because we're inside the
|
||||
// lock...
|
||||
CommonUtils.nextTick(function() {
|
||||
(async () => {
|
||||
await Async.promiseYield();
|
||||
_("Now a fresh sync will get no HMAC errors.");
|
||||
_("Partially resetting client, as if after a restart, and forcing redownload.");
|
||||
Service.collectionKeys.clear();
|
||||
engine.lastSync = 0;
|
||||
await engine.setLastSync(0);
|
||||
hmacErrorCount = 0;
|
||||
|
||||
onSyncFinished = async function() {
|
||||
|
@ -220,18 +223,15 @@ add_task(async function hmac_error_during_node_reassignment() {
|
|||
Svc.Obs.remove("weave:service:sync:finish", obs);
|
||||
Svc.Obs.remove("weave:service:sync:error", obs);
|
||||
|
||||
(async () => {
|
||||
await tracker.clearChangedIDs();
|
||||
await Service.engineManager.unregister(engine);
|
||||
Svc.Prefs.resetBranch("");
|
||||
Service.recordManager.clearCache();
|
||||
server.stop(resolve);
|
||||
})();
|
||||
await tracker.clearChangedIDs();
|
||||
await Service.engineManager.unregister(engine);
|
||||
Svc.Prefs.resetBranch("");
|
||||
Service.recordManager.clearCache();
|
||||
server.stop(resolve);
|
||||
};
|
||||
|
||||
Service.sync();
|
||||
},
|
||||
this);
|
||||
})().catch(Cu.reportError);
|
||||
};
|
||||
};
|
||||
});
|
||||
|
|
|
@ -13,12 +13,13 @@ ChromeUtils.import("resource://services-sync/service.js");
|
|||
let scheduler;
|
||||
let clientsEngine;
|
||||
|
||||
function sync_httpd_setup() {
|
||||
async function sync_httpd_setup() {
|
||||
let clientsSyncID = await clientsEngine.resetLocalSyncID();
|
||||
let global = new ServerWBO("global", {
|
||||
syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
engines: {clients: {version: clientsEngine.version,
|
||||
syncID: clientsEngine.syncID}}
|
||||
syncID: clientsSyncID}}
|
||||
});
|
||||
let clientsColl = new ServerCollection({}, true);
|
||||
|
||||
|
@ -65,7 +66,7 @@ add_task(async function test_successful_sync_adjustSyncInterval() {
|
|||
}
|
||||
Svc.Obs.add("weave:service:sync:finish", onSyncFinish);
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Confirm defaults
|
||||
|
@ -171,7 +172,7 @@ add_task(async function test_unsuccessful_sync_adjustSyncInterval() {
|
|||
// Force sync to fail.
|
||||
Svc.Prefs.set("firstSync", "notReady");
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Confirm defaults
|
||||
|
@ -266,7 +267,7 @@ add_task(async function test_unsuccessful_sync_adjustSyncInterval() {
|
|||
add_task(async function test_back_triggers_sync() {
|
||||
enableValidationPrefs();
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Single device: no sync triggered.
|
||||
|
@ -297,7 +298,7 @@ add_task(async function test_back_triggers_sync() {
|
|||
add_task(async function test_adjust_interval_on_sync_error() {
|
||||
enableValidationPrefs();
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
let syncFailures = 0;
|
||||
|
@ -335,7 +336,7 @@ add_task(async function test_bug671378_scenario() {
|
|||
// scheduleNextSync() was called without a time interval parameter,
|
||||
// setting nextSync to a non-zero value and preventing the timer from
|
||||
// being adjusted in the next call to scheduleNextSync().
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
let syncSuccesses = 0;
|
||||
|
|
|
@ -127,14 +127,13 @@ add_task(async function test_momentary_401_engine() {
|
|||
let john = server.user("johndoe");
|
||||
|
||||
_("Enabling the Rotary engine.");
|
||||
let { engine, tracker } = await registerRotaryEngine();
|
||||
let { engine, syncID, tracker } = await registerRotaryEngine();
|
||||
|
||||
// We need the server to be correctly set up prior to experimenting. Do this
|
||||
// through a sync.
|
||||
let global = {syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
rotary: {version: engine.version, syncID}};
|
||||
john.createCollection("meta").insert("global", global);
|
||||
|
||||
_("First sync to prepare server contents.");
|
||||
|
@ -360,7 +359,7 @@ add_task(async function test_loop_avoidance_engine() {
|
|||
let john = server.user("johndoe");
|
||||
|
||||
_("Enabling the Rotary engine.");
|
||||
let { engine, tracker } = await registerRotaryEngine();
|
||||
let { engine, syncID, tracker } = await registerRotaryEngine();
|
||||
let deferred = PromiseUtils.defer();
|
||||
|
||||
let getTokenCount = 0;
|
||||
|
@ -376,8 +375,7 @@ add_task(async function test_loop_avoidance_engine() {
|
|||
// through a sync.
|
||||
let global = {syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
rotary: {version: engine.version, syncID}};
|
||||
john.createCollection("meta").insert("global", global);
|
||||
|
||||
_("First sync to prepare server contents.");
|
||||
|
|
|
@ -155,11 +155,12 @@ add_task(async function run_test() {
|
|||
Assert.equal(meta_global.data.syncID, Service.syncID);
|
||||
Assert.equal(meta_global.data.storageVersion, STORAGE_VERSION);
|
||||
Assert.equal(meta_global.data.engines.clients.version, Service.clientsEngine.version);
|
||||
Assert.equal(meta_global.data.engines.clients.syncID, Service.clientsEngine.syncID);
|
||||
Assert.equal(meta_global.data.engines.clients.syncID, await Service.clientsEngine.getSyncID());
|
||||
|
||||
_("Set the collection info hash so that sync() will remember the modified times for future runs.");
|
||||
collections.meta = Service.clientsEngine.lastSync;
|
||||
collections.clients = Service.clientsEngine.lastSync;
|
||||
let lastSync = await Service.clientsEngine.getLastSync();
|
||||
collections.meta = lastSync;
|
||||
collections.clients = lastSync;
|
||||
await Service.sync();
|
||||
|
||||
_("Sync again and verify that meta/global wasn't downloaded again");
|
||||
|
|
|
@ -150,11 +150,11 @@ add_task(async function test_disabledLocally() {
|
|||
_("Test: Engine is enabled on remote clients and disabled locally");
|
||||
Service.syncID = "abcdefghij";
|
||||
let engine = Service.engineManager.get("steam");
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let metaWBO = new ServerWBO("global", {
|
||||
syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
engines: {steam: {syncID: engine.syncID,
|
||||
version: engine.version}}
|
||||
engines: {steam: {syncID, version: engine.version}}
|
||||
});
|
||||
let steamCollection = new ServerWBO("steam", PAYLOAD);
|
||||
|
||||
|
@ -194,11 +194,11 @@ add_task(async function test_disabledLocally_wipe503() {
|
|||
_("Test: Engine is enabled on remote clients and disabled locally");
|
||||
Service.syncID = "abcdefghij";
|
||||
let engine = Service.engineManager.get("steam");
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let metaWBO = new ServerWBO("global", {
|
||||
syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
engines: {steam: {syncID: engine.syncID,
|
||||
version: engine.version}}
|
||||
engines: {steam: {syncID, version: engine.version}}
|
||||
});
|
||||
|
||||
function service_unavailable(request, response) {
|
||||
|
@ -234,11 +234,11 @@ add_task(async function test_enabledRemotely() {
|
|||
_("Test: Engine is disabled locally and enabled on a remote client");
|
||||
Service.syncID = "abcdefghij";
|
||||
let engine = Service.engineManager.get("steam");
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let metaWBO = new ServerWBO("global", {
|
||||
syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
engines: {steam: {syncID: engine.syncID,
|
||||
version: engine.version}}
|
||||
engines: {steam: {syncID, version: engine.version}}
|
||||
});
|
||||
let server = sync_httpd_setup({
|
||||
"/1.1/johndoe/storage/meta/global":
|
||||
|
@ -267,7 +267,7 @@ add_task(async function test_enabledRemotely() {
|
|||
Assert.ok(engine.enabled);
|
||||
|
||||
_("Meta record still present.");
|
||||
Assert.equal(metaWBO.data.engines.steam.syncID, engine.syncID);
|
||||
Assert.equal(metaWBO.data.engines.steam.syncID, await engine.getSyncID());
|
||||
} finally {
|
||||
await Service.startOver();
|
||||
await promiseStopServer(server);
|
||||
|
@ -396,13 +396,15 @@ add_task(async function test_dependentEnginesDisabledLocally() {
|
|||
_("Test: Two dependent engines are enabled on remote clients and disabled locally");
|
||||
Service.syncID = "abcdefghij";
|
||||
let steamEngine = Service.engineManager.get("steam");
|
||||
let steamSyncID = await steamEngine.resetLocalSyncID();
|
||||
let stirlingEngine = Service.engineManager.get("stirling");
|
||||
let stirlingSyncID = await stirlingEngine.resetLocalSyncID();
|
||||
let metaWBO = new ServerWBO("global", {
|
||||
syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
engines: {steam: {syncID: steamEngine.syncID,
|
||||
engines: {steam: {syncID: steamSyncID,
|
||||
version: steamEngine.version},
|
||||
stirling: {syncID: stirlingEngine.syncID,
|
||||
stirling: {syncID: stirlingSyncID,
|
||||
version: stirlingEngine.version}}
|
||||
});
|
||||
|
||||
|
|
|
@ -74,43 +74,42 @@ add_task(async function test_syncID() {
|
|||
try {
|
||||
// Ensure pristine environment
|
||||
Assert.equal(Svc.Prefs.get("steam.syncID"), undefined);
|
||||
Assert.equal(await engine.getSyncID(), "");
|
||||
|
||||
// Performing the first get on the attribute will generate a new GUID.
|
||||
Assert.equal(engine.syncID, "fake-guid-00");
|
||||
Assert.equal(await engine.resetLocalSyncID(), "fake-guid-00");
|
||||
Assert.equal(Svc.Prefs.get("steam.syncID"), "fake-guid-00");
|
||||
|
||||
Svc.Prefs.set("steam.syncID", Utils.makeGUID());
|
||||
Assert.equal(Svc.Prefs.get("steam.syncID"), "fake-guid-01");
|
||||
Assert.equal(engine.syncID, "fake-guid-01");
|
||||
Assert.equal(await engine.getSyncID(), "fake-guid-01");
|
||||
} finally {
|
||||
Svc.Prefs.resetBranch("");
|
||||
}
|
||||
});
|
||||
|
||||
add_task(async function test_lastSync() {
|
||||
_("SyncEngine.lastSync and SyncEngine.lastSyncLocal correspond to preferences");
|
||||
_("SyncEngine.lastSync corresponds to preferences");
|
||||
await SyncTestingInfrastructure(server);
|
||||
let engine = await makeSteamEngine();
|
||||
try {
|
||||
// Ensure pristine environment
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSync"), undefined);
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
|
||||
Assert.equal(engine.lastSyncLocal, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
|
||||
// Floats are properly stored as floats and synced with the preference
|
||||
engine.lastSync = 123.45;
|
||||
Assert.equal(engine.lastSync, 123.45);
|
||||
await engine.setLastSync(123.45);
|
||||
Assert.equal(await engine.getLastSync(), 123.45);
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSync"), "123.45");
|
||||
|
||||
// Integer is properly stored
|
||||
engine.lastSyncLocal = 67890;
|
||||
Assert.equal(engine.lastSyncLocal, 67890);
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSyncLocal"), "67890");
|
||||
await engine.setLastSync(67890);
|
||||
Assert.equal(await engine.getLastSync(), 67890);
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSync"), "67890");
|
||||
|
||||
// resetLastSync() resets the value (and preference) to 0
|
||||
engine.resetLastSync();
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
await engine.resetLastSync();
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSync"), "0");
|
||||
} finally {
|
||||
Svc.Prefs.resetBranch("");
|
||||
|
@ -229,17 +228,14 @@ add_task(async function test_resetClient() {
|
|||
try {
|
||||
// Ensure pristine environment
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSync"), undefined);
|
||||
Assert.equal(Svc.Prefs.get("steam.lastSyncLocal"), undefined);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
|
||||
engine.lastSync = 123.45;
|
||||
engine.lastSyncLocal = 67890;
|
||||
await engine.setLastSync(123.45);
|
||||
engine.toFetch = guidSetOfSize(4);
|
||||
engine.previousFailed = guidSetOfSize(3);
|
||||
|
||||
await engine.resetClient();
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(engine.lastSyncLocal, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
} finally {
|
||||
|
@ -261,13 +257,13 @@ add_task(async function test_wipeServer() {
|
|||
|
||||
try {
|
||||
// Some data to reset.
|
||||
engine.lastSync = 123.45;
|
||||
await engine.setLastSync(123.45);
|
||||
engine.toFetch = guidSetOfSize(3),
|
||||
|
||||
_("Wipe server data and reset client.");
|
||||
await engine.wipeServer();
|
||||
Assert.equal(steamCollection.payload, undefined);
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
|
||||
} finally {
|
||||
|
|
|
@ -35,10 +35,10 @@ async function promiseClean(engine, server) {
|
|||
|
||||
async function createServerAndConfigureClient() {
|
||||
let engine = new RotaryEngine(Service);
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
|
||||
let contents = {
|
||||
meta: {global: {engines: {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}}}},
|
||||
meta: {global: {engines: {rotary: {version: engine.version, syncID}}}},
|
||||
crypto: {},
|
||||
rotary: {}
|
||||
};
|
||||
|
@ -105,8 +105,7 @@ add_task(async function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
|
|||
Assert.ok(!!collection.payload("flying"));
|
||||
Assert.ok(!!collection.payload("scotsman"));
|
||||
|
||||
engine.lastSync = Date.now() / 1000;
|
||||
engine.lastSyncLocal = Date.now();
|
||||
await engine.setLastSync(Date.now() / 1000);
|
||||
|
||||
// Trying to prompt a wipe -- we no longer track CryptoMeta per engine,
|
||||
// so it has nothing to check.
|
||||
|
@ -115,10 +114,10 @@ add_task(async function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
|
|||
// The meta/global WBO has been filled with data about the engine
|
||||
let engineData = metaGlobal.payload.engines.rotary;
|
||||
Assert.equal(engineData.version, engine.version);
|
||||
Assert.equal(engineData.syncID, engine.syncID);
|
||||
Assert.equal(engineData.syncID, await engine.getSyncID());
|
||||
|
||||
// Sync was reset and server data was wiped
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
Assert.equal(collection.payload("flying"), undefined);
|
||||
Assert.equal(collection.payload("scotsman"), undefined);
|
||||
|
||||
|
@ -173,19 +172,18 @@ add_task(async function test_syncStartup_syncIDMismatchResetsClient() {
|
|||
try {
|
||||
|
||||
// Confirm initial environment
|
||||
Assert.equal(engine.syncID, "fake-guid-00");
|
||||
Assert.equal(await engine.getSyncID(), "");
|
||||
const changes = await engine._tracker.getChangedIDs();
|
||||
Assert.equal(changes.rekolok, undefined);
|
||||
|
||||
engine.lastSync = Date.now() / 1000;
|
||||
engine.lastSyncLocal = Date.now();
|
||||
await engine.setLastSync(Date.now() / 1000);
|
||||
await engine._syncStartup();
|
||||
|
||||
// The engine has assumed the server's syncID
|
||||
Assert.equal(engine.syncID, "foobar");
|
||||
Assert.equal(await engine.getSyncID(), "foobar");
|
||||
|
||||
// Sync was reset
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
|
||||
} finally {
|
||||
await cleanAndGo(engine, server);
|
||||
|
@ -208,7 +206,7 @@ add_task(async function test_processIncoming_emptyServer() {
|
|||
|
||||
// Merely ensure that this code path is run without any errors
|
||||
await engine._processIncoming();
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
|
||||
} finally {
|
||||
await cleanAndGo(engine, server);
|
||||
|
@ -244,15 +242,15 @@ add_task(async function test_processIncoming_createFromServer() {
|
|||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
let engine = makeRotaryEngine();
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
try {
|
||||
|
||||
// Confirm initial environment
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
Assert.equal(engine.lastModified, null);
|
||||
Assert.equal(engine._store.items.flying, undefined);
|
||||
Assert.equal(engine._store.items.scotsman, undefined);
|
||||
|
@ -262,7 +260,7 @@ add_task(async function test_processIncoming_createFromServer() {
|
|||
await engine._processIncoming();
|
||||
|
||||
// Timestamps of last sync and last server modification are set.
|
||||
Assert.ok(engine.lastSync > 0);
|
||||
Assert.ok((await engine.getLastSync()) > 0);
|
||||
Assert.ok(engine.lastModified > 0);
|
||||
|
||||
// Local records have been created from the server data.
|
||||
|
@ -337,10 +335,10 @@ add_task(async function test_processIncoming_reconcile() {
|
|||
// This record has been changed 2 mins later than the one on the server
|
||||
await engine._tracker.addChangedID("olderidentical", Date.now() / 1000);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
try {
|
||||
|
||||
|
@ -357,7 +355,7 @@ add_task(async function test_processIncoming_reconcile() {
|
|||
await engine._processIncoming();
|
||||
|
||||
// Timestamps of last sync and last server modification are set.
|
||||
Assert.ok(engine.lastSync > 0);
|
||||
Assert.ok((await engine.getLastSync()) > 0);
|
||||
Assert.ok(engine.lastModified > 0);
|
||||
|
||||
// The new record is created.
|
||||
|
@ -395,7 +393,7 @@ add_task(async function test_processIncoming_reconcile_local_deleted() {
|
|||
let [engine, server, user] = await createServerAndConfigureClient();
|
||||
|
||||
let now = Date.now() / 1000 - 10;
|
||||
engine.lastSync = now;
|
||||
await engine.setLastSync(now);
|
||||
engine.lastModified = now + 1;
|
||||
|
||||
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
|
||||
|
@ -428,7 +426,7 @@ add_task(async function test_processIncoming_reconcile_equivalent() {
|
|||
let [engine, server, user] = await createServerAndConfigureClient();
|
||||
|
||||
let now = Date.now() / 1000 - 10;
|
||||
engine.lastSync = now;
|
||||
await engine.setLastSync(now);
|
||||
engine.lastModified = now + 1;
|
||||
|
||||
let record = encryptPayload({id: "entry", denomination: "denomination"});
|
||||
|
@ -455,7 +453,7 @@ add_task(async function test_processIncoming_reconcile_locally_deleted_dupe_new(
|
|||
let [engine, server, user] = await createServerAndConfigureClient();
|
||||
|
||||
let now = Date.now() / 1000 - 10;
|
||||
engine.lastSync = now;
|
||||
await engine.setLastSync(now);
|
||||
engine.lastModified = now + 1;
|
||||
|
||||
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
|
||||
|
@ -494,7 +492,7 @@ add_task(async function test_processIncoming_reconcile_locally_deleted_dupe_old(
|
|||
let [engine, server, user] = await createServerAndConfigureClient();
|
||||
|
||||
let now = Date.now() / 1000 - 10;
|
||||
engine.lastSync = now;
|
||||
await engine.setLastSync(now);
|
||||
engine.lastModified = now + 1;
|
||||
|
||||
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
|
||||
|
@ -530,7 +528,7 @@ add_task(async function test_processIncoming_reconcile_changed_dupe() {
|
|||
let [engine, server, user] = await createServerAndConfigureClient();
|
||||
|
||||
let now = Date.now() / 1000 - 10;
|
||||
engine.lastSync = now;
|
||||
await engine.setLastSync(now);
|
||||
engine.lastModified = now + 1;
|
||||
|
||||
// The local record is newer than the incoming one, so it should be retained.
|
||||
|
@ -570,7 +568,7 @@ add_task(async function test_processIncoming_reconcile_changed_dupe_new() {
|
|||
let [engine, server, user] = await createServerAndConfigureClient();
|
||||
|
||||
let now = Date.now() / 1000 - 10;
|
||||
engine.lastSync = now;
|
||||
await engine.setLastSync(now);
|
||||
engine.lastModified = now + 1;
|
||||
|
||||
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
|
||||
|
@ -630,7 +628,7 @@ add_task(async function test_processIncoming_resume_toFetch() {
|
|||
|
||||
// Time travel 10 seconds into the future but still download the above WBOs.
|
||||
let engine = makeRotaryEngine();
|
||||
engine.lastSync = LASTSYNC;
|
||||
await engine.setLastSync(LASTSYNC);
|
||||
engine.toFetch = new SerializableSet(["flying", "scotsman"]);
|
||||
engine.previousFailed = new SerializableSet(["failed0", "failed1", "failed2"]);
|
||||
|
||||
|
@ -640,10 +638,10 @@ add_task(async function test_processIncoming_resume_toFetch() {
|
|||
|
||||
await SyncTestingInfrastructure(server);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
try {
|
||||
|
||||
// Confirm initial environment
|
||||
|
@ -700,13 +698,13 @@ add_task(async function test_processIncoming_notify_count() {
|
|||
|
||||
await SyncTestingInfrastructure(server);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
try {
|
||||
// Confirm initial environment.
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
do_check_empty(engine._store.items);
|
||||
|
@ -789,13 +787,13 @@ add_task(async function test_processIncoming_previousFailed() {
|
|||
|
||||
await SyncTestingInfrastructure(server);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
try {
|
||||
// Confirm initial environment.
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
do_check_empty(engine._store.items);
|
||||
|
@ -897,15 +895,15 @@ add_task(async function test_processIncoming_failed_records() {
|
|||
|
||||
await SyncTestingInfrastructure(server);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
try {
|
||||
|
||||
// Confirm initial environment
|
||||
Assert.equal(engine.lastSync, 0);
|
||||
Assert.equal(await engine.getLastSync(), 0);
|
||||
Assert.equal(engine.toFetch.size, 0);
|
||||
Assert.equal(engine.previousFailed.size, 0);
|
||||
do_check_empty(engine._store.items);
|
||||
|
@ -998,10 +996,10 @@ add_task(async function test_processIncoming_decrypt_failed() {
|
|||
|
||||
await SyncTestingInfrastructure(server);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
try {
|
||||
|
||||
// Confirm initial state
|
||||
|
@ -1016,7 +1014,7 @@ add_task(async function test_processIncoming_decrypt_failed() {
|
|||
observerData = data;
|
||||
});
|
||||
|
||||
engine.lastSync = collection.wbo("nojson").modified - 1;
|
||||
await engine.setLastSync(collection.wbo("nojson").modified - 1);
|
||||
let ping = await sync_engine_and_validate_telem(engine, true);
|
||||
Assert.equal(ping.engines[0].incoming.applied, 2);
|
||||
Assert.equal(ping.engines[0].incoming.failed, 4);
|
||||
|
@ -1056,30 +1054,26 @@ add_task(async function test_uploadOutgoing_toEmptyServer() {
|
|||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
let engine = makeRotaryEngine();
|
||||
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
|
||||
engine._store.items = {flying: "LNER Class A3 4472",
|
||||
scotsman: "Flying Scotsman"};
|
||||
// Mark one of these records as changed
|
||||
await engine._tracker.addChangedID("scotsman", 0);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
try {
|
||||
await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
|
||||
|
||||
// Confirm initial environment
|
||||
Assert.equal(engine.lastSyncLocal, 0);
|
||||
Assert.equal(collection.payload("flying"), undefined);
|
||||
Assert.equal(collection.payload("scotsman"), undefined);
|
||||
|
||||
await engine._syncStartup();
|
||||
await engine._uploadOutgoing();
|
||||
|
||||
// Local timestamp has been set.
|
||||
Assert.ok(engine.lastSyncLocal > 0);
|
||||
|
||||
// Ensure the marked record ('scotsman') has been uploaded and is
|
||||
// no longer marked.
|
||||
Assert.equal(collection.payload("flying"), undefined);
|
||||
|
@ -1113,20 +1107,20 @@ async function test_uploadOutgoing_max_record_payload_bytes(allowSkippedRecord)
|
|||
|
||||
let engine = makeRotaryEngine();
|
||||
engine.allowSkippedRecord = allowSkippedRecord;
|
||||
engine.lastSync = 1;
|
||||
engine._store.items = { flying: "a".repeat(1024 * 1024), scotsman: "abcd" };
|
||||
|
||||
await engine._tracker.addChangedID("flying", 1000);
|
||||
await engine._tracker.addChangedID("scotsman", 1000);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
try {
|
||||
await engine.setLastSync(1); // needs to be non-zero so that tracker is queried
|
||||
|
||||
// Confirm initial environment
|
||||
Assert.equal(engine.lastSyncLocal, 0);
|
||||
Assert.equal(collection.payload("flying"), undefined);
|
||||
Assert.equal(collection.payload("scotsman"), undefined);
|
||||
|
||||
|
@ -1188,7 +1182,6 @@ add_task(async function test_uploadOutgoing_failed() {
|
|||
await SyncTestingInfrastructure(server);
|
||||
|
||||
let engine = makeRotaryEngine();
|
||||
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
|
||||
engine._store.items = {flying: "LNER Class A3 4472",
|
||||
scotsman: "Flying Scotsman",
|
||||
peppercorn: "Peppercorn Class"};
|
||||
|
@ -1200,15 +1193,15 @@ add_task(async function test_uploadOutgoing_failed() {
|
|||
await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
|
||||
await engine._tracker.addChangedID("peppercorn", PEPPERCORN_CHANGED);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
try {
|
||||
await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
|
||||
|
||||
// Confirm initial environment
|
||||
Assert.equal(engine.lastSyncLocal, 0);
|
||||
Assert.equal(collection.payload("flying"), undefined);
|
||||
let changes = await engine._tracker.getChangedIDs();
|
||||
Assert.equal(changes.flying, FLYING_CHANGED);
|
||||
|
@ -1218,9 +1211,6 @@ add_task(async function test_uploadOutgoing_failed() {
|
|||
engine.enabled = true;
|
||||
await sync_engine_and_validate_telem(engine, true);
|
||||
|
||||
// Local timestamp has been set.
|
||||
Assert.ok(engine.lastSyncLocal > 0);
|
||||
|
||||
// Ensure the 'flying' record has been uploaded and is no longer marked.
|
||||
Assert.ok(!!collection.payload("flying"));
|
||||
changes = await engine._tracker.getChangedIDs();
|
||||
|
@ -1257,7 +1247,6 @@ async function createRecordFailTelemetry(allowSkippedRecord) {
|
|||
}
|
||||
return oldCreateRecord.call(engine._store, id, col);
|
||||
};
|
||||
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
|
||||
engine._store.items = {flying: "LNER Class A3 4472",
|
||||
scotsman: "Flying Scotsman"};
|
||||
// Mark these records as changed
|
||||
|
@ -1266,15 +1255,16 @@ async function createRecordFailTelemetry(allowSkippedRecord) {
|
|||
await engine._tracker.addChangedID("flying", FLYING_CHANGED);
|
||||
await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
let ping;
|
||||
try {
|
||||
await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
|
||||
|
||||
// Confirm initial environment
|
||||
Assert.equal(engine.lastSyncLocal, 0);
|
||||
Assert.equal(collection.payload("flying"), undefined);
|
||||
let changes = await engine._tracker.getChangedIDs();
|
||||
Assert.equal(changes.flying, FLYING_CHANGED);
|
||||
|
@ -1303,9 +1293,6 @@ async function createRecordFailTelemetry(allowSkippedRecord) {
|
|||
const changes = await engine._tracker.getChangedIDs();
|
||||
Assert.ok(changes.flying);
|
||||
} finally {
|
||||
// Local timestamp has been set.
|
||||
Assert.ok(engine.lastSyncLocal > 0);
|
||||
|
||||
// We reported in telemetry that we failed a record
|
||||
Assert.equal(ping.engines[0].outgoing[0].failed, 1);
|
||||
|
||||
|
@ -1341,11 +1328,10 @@ add_task(async function test_uploadOutgoing_largeRecords() {
|
|||
await engine._tracker.addChangedID("large-item", 0);
|
||||
collection.insert("large-item");
|
||||
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
let server = sync_httpd_setup({
|
||||
"/1.1/foo/storage/rotary": collection.handler()
|
||||
|
@ -1492,8 +1478,6 @@ add_task(async function test_sync_partialUpload() {
|
|||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
let engine = makeRotaryEngine();
|
||||
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
|
||||
engine.lastSyncLocal = 456;
|
||||
|
||||
// Let the third upload fail completely
|
||||
var noOfUploads = 0;
|
||||
|
@ -1517,12 +1501,13 @@ add_task(async function test_sync_partialUpload() {
|
|||
}
|
||||
}
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
try {
|
||||
await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
|
||||
|
||||
engine.enabled = true;
|
||||
let error;
|
||||
|
@ -1534,9 +1519,6 @@ add_task(async function test_sync_partialUpload() {
|
|||
|
||||
ok(!!error);
|
||||
|
||||
// The timestamp has been updated.
|
||||
Assert.ok(engine.lastSyncLocal > 456);
|
||||
|
||||
const changes = await engine._tracker.getChangedIDs();
|
||||
for (let i = 0; i < 234; i++) {
|
||||
let id = "record-no-" + i;
|
||||
|
@ -1627,10 +1609,10 @@ add_task(async function test_syncapplied_observer() {
|
|||
|
||||
await SyncTestingInfrastructure(server);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
let numApplyCalls = 0;
|
||||
let engine_name;
|
||||
|
|
|
@ -26,12 +26,13 @@ CatapultEngine.prototype = {
|
|||
var scheduler = new SyncScheduler(Service);
|
||||
let clientsEngine;
|
||||
|
||||
function sync_httpd_setup() {
|
||||
async function sync_httpd_setup() {
|
||||
let clientsSyncID = await clientsEngine.resetLocalSyncID();
|
||||
let global = new ServerWBO("global", {
|
||||
syncID: Service.syncID,
|
||||
storageVersion: STORAGE_VERSION,
|
||||
engines: {clients: {version: clientsEngine.version,
|
||||
syncID: clientsEngine.syncID}}
|
||||
syncID: clientsSyncID}}
|
||||
});
|
||||
let clientsColl = new ServerCollection({}, true);
|
||||
|
||||
|
@ -213,7 +214,7 @@ add_task(async function test_masterpassword_locked_retry_interval() {
|
|||
return false;
|
||||
};
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
await Service.sync();
|
||||
|
@ -255,7 +256,7 @@ add_task(async function test_scheduleNextSync_nowOrPast() {
|
|||
|
||||
let promiseObserved = promiseOneObserver("weave:service:sync:finish");
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// We're late for a sync...
|
||||
|
@ -370,7 +371,7 @@ add_task(async function test_scheduleNextSync_future_backoff() {
|
|||
add_task(async function test_handleSyncError() {
|
||||
enableValidationPrefs();
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Force sync to fail.
|
||||
|
@ -435,7 +436,7 @@ add_task(async function test_handleSyncError() {
|
|||
add_task(async function test_client_sync_finish_updateClientMode() {
|
||||
enableValidationPrefs();
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Confirm defaults.
|
||||
|
@ -479,7 +480,7 @@ add_task(async function test_autoconnect_nextSync_past() {
|
|||
let promiseObserved = promiseOneObserver("weave:service:sync:finish");
|
||||
// nextSync will be 0 by default, so it's way in the past.
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
scheduler.delayedAutoConnect(0);
|
||||
|
@ -514,7 +515,7 @@ add_task(async function test_autoconnect_nextSync_future() {
|
|||
});
|
||||
|
||||
add_task(async function test_autoconnect_mp_locked() {
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Pretend user did not unlock master password.
|
||||
|
@ -556,7 +557,7 @@ add_task(async function test_autoconnect_mp_locked() {
|
|||
});
|
||||
|
||||
add_task(async function test_no_autoconnect_during_wizard() {
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Simulate the Sync setup wizard.
|
||||
|
@ -575,7 +576,7 @@ add_task(async function test_no_autoconnect_during_wizard() {
|
|||
});
|
||||
|
||||
add_task(async function test_no_autoconnect_status_not_ok() {
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
Status.__authManager = Service.identity = new BrowserIDManager();
|
||||
|
||||
// Ensure we don't actually try to sync (or log in for that matter).
|
||||
|
@ -601,7 +602,7 @@ add_task(async function test_autoconnectDelay_pref() {
|
|||
|
||||
Svc.Prefs.set("autoconnectDelay", 1);
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
Svc.Obs.notify("weave:service:ready");
|
||||
|
@ -716,7 +717,7 @@ add_task(async function test_no_sync_node() {
|
|||
|
||||
// Test when Status.sync == NO_SYNC_NODE_FOUND
|
||||
// it is not overwritten on sync:finish
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
let oldfc = Service.identity._findCluster;
|
||||
|
@ -738,7 +739,7 @@ add_task(async function test_sync_failed_partial_500s() {
|
|||
|
||||
_("Test a 5xx status calls handleSyncError.");
|
||||
scheduler._syncErrors = MAX_ERROR_COUNT_BEFORE_BACKOFF;
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
|
||||
let engine = Service.engineManager.get("catapult");
|
||||
engine.enabled = true;
|
||||
|
@ -764,7 +765,7 @@ add_task(async function test_sync_failed_partial_500s() {
|
|||
|
||||
add_task(async function test_sync_failed_partial_noresync() {
|
||||
enableValidationPrefs();
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
|
||||
let engine = Service.engineManager.get("catapult");
|
||||
engine.enabled = true;
|
||||
|
@ -798,7 +799,7 @@ add_task(async function test_sync_failed_partial_400s() {
|
|||
|
||||
_("Test a non-5xx status doesn't call handleSyncError.");
|
||||
scheduler._syncErrors = MAX_ERROR_COUNT_BEFORE_BACKOFF;
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
|
||||
let engine = Service.engineManager.get("catapult");
|
||||
engine.enabled = true;
|
||||
|
@ -830,7 +831,7 @@ add_task(async function test_sync_failed_partial_400s() {
|
|||
add_task(async function test_sync_X_Weave_Backoff() {
|
||||
enableValidationPrefs();
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Use an odd value on purpose so that it doesn't happen to coincide with one
|
||||
|
@ -889,7 +890,7 @@ add_task(async function test_sync_X_Weave_Backoff() {
|
|||
add_task(async function test_sync_503_Retry_After() {
|
||||
enableValidationPrefs();
|
||||
|
||||
let server = sync_httpd_setup();
|
||||
let server = await sync_httpd_setup();
|
||||
await setUp(server);
|
||||
|
||||
// Use an odd value on purpose so that it doesn't happen to coincide with one
|
||||
|
|
|
@ -74,10 +74,10 @@ add_task(async function test_tab_engine_skips_incoming_local_record() {
|
|||
|
||||
await SyncTestingInfrastructure(server);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {tabs: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {tabs: {version: engine.version, syncID}};
|
||||
|
||||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ add_task(async function test_processIncoming_error() {
|
|||
};
|
||||
// Make the 10 minutes old so it will only be synced in the toFetch phase.
|
||||
bogus_record.modified = Date.now() / 1000 - 60 * 10;
|
||||
engine.lastSync = Date.now() / 1000 - 60;
|
||||
await engine.setLastSync(Date.now() / 1000 - 60);
|
||||
engine.toFetch = new SerializableSet([BOGUS_GUID]);
|
||||
|
||||
let error, pingPayload, fullPing;
|
||||
|
@ -210,8 +210,6 @@ add_task(async function test_upload_failed() {
|
|||
await configureIdentity({ username: "foo" }, server);
|
||||
|
||||
let engine = new RotaryEngine(Service);
|
||||
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
|
||||
engine.lastSyncLocal = 456;
|
||||
engine._store.items = {
|
||||
flying: "LNER Class A3 4472",
|
||||
scotsman: "Flying Scotsman",
|
||||
|
@ -224,10 +222,12 @@ add_task(async function test_upload_failed() {
|
|||
await engine._tracker.addChangedID("scotsman", SCOTSMAN_CHANGED);
|
||||
await engine._tracker.addChangedID("peppercorn", PEPPERCORN_CHANGED);
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL, new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = { rotary: { version: engine.version, syncID: engine.syncID } };
|
||||
meta_global.payload.engines = { rotary: { version: engine.version, syncID } };
|
||||
|
||||
try {
|
||||
await engine.setLastSync(123); // needs to be non-zero so that tracker is queried
|
||||
let changes = await engine._tracker.getChangedIDs();
|
||||
_(`test_upload_failed: Rotary tracker contents at first sync: ${
|
||||
JSON.stringify(changes)}`);
|
||||
|
@ -237,8 +237,7 @@ add_task(async function test_upload_failed() {
|
|||
equal(ping.engines.length, 1);
|
||||
equal(ping.engines[0].incoming, null);
|
||||
deepEqual(ping.engines[0].outgoing, [{ sent: 3, failed: 2 }]);
|
||||
engine.lastSync = 123;
|
||||
engine.lastSyncLocal = 456;
|
||||
await engine.setLastSync(123);
|
||||
|
||||
changes = await engine._tracker.getChangedIDs();
|
||||
_(`test_upload_failed: Rotary tracker contents at second sync: ${
|
||||
|
@ -264,9 +263,7 @@ add_task(async function test_sync_partialUpload() {
|
|||
await generateNewKeys(Service.collectionKeys);
|
||||
|
||||
let engine = new RotaryEngine(Service);
|
||||
engine.lastSync = 123;
|
||||
engine.lastSyncLocal = 456;
|
||||
|
||||
await engine.setLastSync(123);
|
||||
|
||||
// Create a bunch of records (and server side handlers)
|
||||
for (let i = 0; i < 234; i++) {
|
||||
|
@ -279,10 +276,10 @@ add_task(async function test_sync_partialUpload() {
|
|||
}
|
||||
}
|
||||
|
||||
let syncID = await engine.resetLocalSyncID();
|
||||
let meta_global = Service.recordManager.set(engine.metaURL,
|
||||
new WBORecord(engine.metaURL));
|
||||
meta_global.payload.engines = {rotary: {version: engine.version,
|
||||
syncID: engine.syncID}};
|
||||
meta_global.payload.engines = {rotary: {version: engine.version, syncID}};
|
||||
|
||||
try {
|
||||
let changes = await engine._tracker.getChangedIDs();
|
||||
|
@ -305,8 +302,7 @@ add_task(async function test_sync_partialUpload() {
|
|||
await engine._tracker.addChangedID("record-no-1000", 1000);
|
||||
collection.insert("record-no-1000", 1000);
|
||||
|
||||
engine.lastSync = 123;
|
||||
engine.lastSyncLocal = 456;
|
||||
await engine.setLastSync(123);
|
||||
ping = null;
|
||||
|
||||
changes = await engine._tracker.getChangedIDs();
|
||||
|
|
Загрузка…
Ссылка в новой задаче