2012-05-21 15:12:37 +04:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
2007-12-11 08:38:53 +03:00
|
|
|
|
2012-10-31 20:13:28 +04:00
|
|
|
this.EXPORTED_SYMBOLS = [
|
2012-08-30 01:43:41 +04:00
|
|
|
"EngineManager",
|
2012-08-30 01:43:40 +04:00
|
|
|
"Engine",
|
|
|
|
"SyncEngine",
|
|
|
|
"Tracker",
|
2016-09-06 21:39:13 +03:00
|
|
|
"Store",
|
|
|
|
"Changeset"
|
2012-08-30 01:43:40 +04:00
|
|
|
];
|
2007-12-11 08:38:53 +03:00
|
|
|
|
2015-10-07 15:03:21 +03:00
|
|
|
var {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
|
2007-12-11 08:38:53 +03:00
|
|
|
|
2012-04-06 10:26:06 +04:00
|
|
|
Cu.import("resource://services-common/async.js");
|
2013-08-26 22:55:58 +04:00
|
|
|
Cu.import("resource://gre/modules/Log.jsm");
|
2012-04-06 10:26:06 +04:00
|
|
|
Cu.import("resource://services-common/observers.js");
|
2012-08-30 01:43:40 +04:00
|
|
|
Cu.import("resource://services-sync/constants.js");
|
2010-06-17 01:30:08 +04:00
|
|
|
Cu.import("resource://services-sync/identity.js");
|
2012-08-30 01:43:40 +04:00
|
|
|
Cu.import("resource://services-sync/record.js");
|
2010-06-17 01:30:08 +04:00
|
|
|
Cu.import("resource://services-sync/resource.js");
|
|
|
|
Cu.import("resource://services-sync/util.js");
|
2008-11-08 13:00:33 +03:00
|
|
|
|
2016-08-02 20:09:30 +03:00
|
|
|
XPCOMUtils.defineLazyModuleGetter(this, "fxAccounts",
|
|
|
|
"resource://gre/modules/FxAccounts.jsm");
|
|
|
|
|
2011-01-19 03:23:25 +03:00
|
|
|
/*
|
|
|
|
* Trackers are associated with a single engine and deal with
|
|
|
|
* listening for changes to their particular data type.
|
|
|
|
*
|
|
|
|
* There are two things they keep track of:
|
|
|
|
* 1) A score, indicating how urgently the engine wants to sync
|
|
|
|
* 2) A list of IDs for all the changed items that need to be synced
|
|
|
|
* and updating their 'score', indicating how urgently they
|
|
|
|
* want to sync.
|
|
|
|
*
|
|
|
|
*/
|
2012-10-31 20:13:28 +04:00
|
|
|
this.Tracker = function Tracker(name, engine) {
|
2012-08-30 01:43:41 +04:00
|
|
|
if (!engine) {
|
|
|
|
throw new Error("Tracker must be associated with an Engine instance.");
|
|
|
|
}
|
|
|
|
|
2011-01-19 03:23:25 +03:00
|
|
|
name = name || "Unnamed";
|
|
|
|
this.name = this.file = name.toLowerCase();
|
2012-08-30 01:43:41 +04:00
|
|
|
this.engine = engine;
|
2011-01-19 03:23:25 +03:00
|
|
|
|
2013-08-26 22:55:58 +04:00
|
|
|
this._log = Log.repository.getLogger("Sync.Tracker." + name);
|
2011-01-19 03:23:25 +03:00
|
|
|
let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
|
2013-08-26 22:55:58 +04:00
|
|
|
this._log.level = Log.Level[level];
|
2011-01-19 03:23:25 +03:00
|
|
|
|
|
|
|
this._score = 0;
|
|
|
|
this._ignored = [];
|
|
|
|
this.ignoreAll = false;
|
|
|
|
this.changedIDs = {};
|
|
|
|
this.loadChangedIDs();
|
2013-12-19 09:42:17 +04:00
|
|
|
|
|
|
|
Svc.Obs.add("weave:engine:start-tracking", this);
|
|
|
|
Svc.Obs.add("weave:engine:stop-tracking", this);
|
2014-11-22 11:09:10 +03:00
|
|
|
|
|
|
|
Svc.Prefs.observe("engine." + this.engine.prefName, this);
|
2013-12-19 09:42:17 +04:00
|
|
|
};
|
|
|
|
|
2011-01-19 03:23:25 +03:00
|
|
|
Tracker.prototype = {
|
|
|
|
/*
|
|
|
|
* Score can be called as often as desired to decide which engines to sync
|
|
|
|
*
|
|
|
|
* Valid values for score:
|
|
|
|
* -1: Do not sync unless the user specifically requests it (almost disabled)
|
|
|
|
* 0: Nothing has changed
|
|
|
|
* 100: Please sync me ASAP!
|
|
|
|
*
|
|
|
|
* Setting it to other values should (but doesn't currently) throw an exception
|
|
|
|
*/
|
|
|
|
get score() {
|
|
|
|
return this._score;
|
|
|
|
},
|
|
|
|
|
|
|
|
set score(value) {
|
|
|
|
this._score = value;
|
|
|
|
Observers.notify("weave:engine:score:updated", this.name);
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be called by service everytime a sync has been done for an engine
|
2013-12-19 09:42:17 +04:00
|
|
|
resetScore: function () {
|
2011-01-19 03:23:25 +03:00
|
|
|
this._score = 0;
|
|
|
|
},
|
|
|
|
|
2012-12-06 04:46:25 +04:00
|
|
|
persistChangedIDs: true,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Persist changedIDs to disk at a later date.
|
|
|
|
* Optionally pass a callback to be invoked when the write has occurred.
|
|
|
|
*/
|
|
|
|
saveChangedIDs: function (cb) {
|
|
|
|
if (!this.persistChangedIDs) {
|
|
|
|
this._log.debug("Not saving changedIDs.");
|
|
|
|
return;
|
|
|
|
}
|
2013-12-19 09:42:17 +04:00
|
|
|
Utils.namedTimer(function () {
|
2013-01-11 22:03:15 +04:00
|
|
|
this._log.debug("Saving changed IDs to " + this.file);
|
2012-12-06 04:46:25 +04:00
|
|
|
Utils.jsonSave("changes/" + this.file, this, this.changedIDs, cb);
|
2011-01-19 03:23:25 +03:00
|
|
|
}, 1000, this, "_lazySave");
|
|
|
|
},
|
|
|
|
|
2012-12-23 00:44:05 +04:00
|
|
|
loadChangedIDs: function (cb) {
|
2011-01-19 03:23:25 +03:00
|
|
|
Utils.jsonLoad("changes/" + this.file, this, function(json) {
|
2012-12-23 00:44:05 +04:00
|
|
|
if (json && (typeof(json) == "object")) {
|
2011-01-27 08:43:24 +03:00
|
|
|
this.changedIDs = json;
|
2016-09-22 10:17:47 +03:00
|
|
|
} else {
|
2012-12-23 00:44:05 +04:00
|
|
|
this._log.warn("Changed IDs file " + this.file + " contains non-object value.");
|
|
|
|
json = null;
|
|
|
|
}
|
|
|
|
if (cb) {
|
|
|
|
cb.call(this, json);
|
2011-01-27 08:43:24 +03:00
|
|
|
}
|
2011-01-19 03:23:25 +03:00
|
|
|
});
|
|
|
|
},
|
|
|
|
|
|
|
|
// ignore/unignore specific IDs. Useful for ignoring items that are
|
|
|
|
// being processed, or that shouldn't be synced.
|
|
|
|
// But note: not persisted to disk
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
ignoreID: function (id) {
|
2011-01-19 03:23:25 +03:00
|
|
|
this.unignoreID(id);
|
|
|
|
this._ignored.push(id);
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
unignoreID: function (id) {
|
2011-01-19 03:23:25 +03:00
|
|
|
let index = this._ignored.indexOf(id);
|
|
|
|
if (index != -1)
|
|
|
|
this._ignored.splice(index, 1);
|
|
|
|
},
|
|
|
|
|
2016-09-06 21:39:13 +03:00
|
|
|
_saveChangedID(id, when) {
|
|
|
|
this._log.trace(`Adding changed ID: ${id}, ${JSON.stringify(when)}`);
|
|
|
|
this.changedIDs[id] = when;
|
|
|
|
this.saveChangedIDs(this.onSavedChangedIDs);
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
addChangedID: function (id, when) {
|
2011-01-19 03:23:25 +03:00
|
|
|
if (!id) {
|
|
|
|
this._log.warn("Attempted to add undefined ID to tracker");
|
|
|
|
return false;
|
|
|
|
}
|
2013-12-19 09:42:17 +04:00
|
|
|
|
2016-09-06 21:39:13 +03:00
|
|
|
if (this.ignoreAll || this._ignored.includes(id)) {
|
2011-01-19 03:23:25 +03:00
|
|
|
return false;
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2011-01-19 03:23:25 +03:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
// Default to the current time in seconds if no time is provided.
|
|
|
|
if (when == null) {
|
2016-07-05 09:02:19 +03:00
|
|
|
when = this._now();
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2011-01-19 03:23:25 +03:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
// Add/update the entry if we have a newer time.
|
2011-01-19 03:23:25 +03:00
|
|
|
if ((this.changedIDs[id] || -Infinity) < when) {
|
2016-09-06 21:39:13 +03:00
|
|
|
this._saveChangedID(id, when);
|
2011-01-19 03:23:25 +03:00
|
|
|
}
|
2013-12-19 09:42:17 +04:00
|
|
|
|
2011-01-19 03:23:25 +03:00
|
|
|
return true;
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
removeChangedID: function (id) {
|
2011-01-19 03:23:25 +03:00
|
|
|
if (!id) {
|
|
|
|
this._log.warn("Attempted to remove undefined ID to tracker");
|
|
|
|
return false;
|
|
|
|
}
|
2016-09-06 21:39:13 +03:00
|
|
|
if (this.ignoreAll || this._ignored.includes(id)) {
|
2011-01-19 03:23:25 +03:00
|
|
|
return false;
|
2016-09-06 21:39:13 +03:00
|
|
|
}
|
2011-01-19 03:23:25 +03:00
|
|
|
if (this.changedIDs[id] != null) {
|
|
|
|
this._log.trace("Removing changed ID " + id);
|
|
|
|
delete this.changedIDs[id];
|
|
|
|
this.saveChangedIDs();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
clearChangedIDs: function () {
|
2011-01-19 03:23:25 +03:00
|
|
|
this._log.trace("Clearing changed ID list");
|
|
|
|
this.changedIDs = {};
|
|
|
|
this.saveChangedIDs();
|
2013-12-19 09:42:17 +04:00
|
|
|
},
|
|
|
|
|
2016-07-05 09:02:19 +03:00
|
|
|
_now() {
|
|
|
|
return Date.now() / 1000;
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
_isTracking: false,
|
|
|
|
|
|
|
|
// Override these in your subclasses.
|
|
|
|
startTracking: function () {
|
|
|
|
},
|
|
|
|
|
|
|
|
stopTracking: function () {
|
|
|
|
},
|
|
|
|
|
|
|
|
engineIsEnabled: function () {
|
|
|
|
if (!this.engine) {
|
|
|
|
// Can't tell -- we must be running in a test!
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return this.engine.enabled;
|
|
|
|
},
|
|
|
|
|
|
|
|
onEngineEnabledChanged: function (engineEnabled) {
|
|
|
|
if (engineEnabled == this._isTracking) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (engineEnabled) {
|
|
|
|
this.startTracking();
|
|
|
|
this._isTracking = true;
|
|
|
|
} else {
|
|
|
|
this.stopTracking();
|
|
|
|
this._isTracking = false;
|
|
|
|
this.clearChangedIDs();
|
|
|
|
}
|
|
|
|
},
|
|
|
|
|
|
|
|
observe: function (subject, topic, data) {
|
|
|
|
switch (topic) {
|
|
|
|
case "weave:engine:start-tracking":
|
|
|
|
if (!this.engineIsEnabled()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
this._log.trace("Got start-tracking.");
|
|
|
|
if (!this._isTracking) {
|
|
|
|
this.startTracking();
|
|
|
|
this._isTracking = true;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
case "weave:engine:stop-tracking":
|
|
|
|
this._log.trace("Got stop-tracking.");
|
|
|
|
if (this._isTracking) {
|
|
|
|
this.stopTracking();
|
|
|
|
this._isTracking = false;
|
|
|
|
}
|
|
|
|
return;
|
2014-11-22 11:09:10 +03:00
|
|
|
case "nsPref:changed":
|
|
|
|
if (data == PREFS_BRANCH + "engine." + this.engine.prefName) {
|
|
|
|
this.onEngineEnabledChanged(this.engine.enabled);
|
|
|
|
}
|
|
|
|
return;
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2011-01-19 03:23:25 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* The Store serves as the interface between Sync and stored data.
|
|
|
|
*
|
|
|
|
* The name "store" is slightly a misnomer because it doesn't actually "store"
|
|
|
|
* anything. Instead, it serves as a gateway to something that actually does
|
|
|
|
* the "storing."
|
|
|
|
*
|
|
|
|
* The store is responsible for record management inside an engine. It tells
|
|
|
|
* Sync what items are available for Sync, converts items to and from Sync's
|
|
|
|
* record format, and applies records from Sync into changes on the underlying
|
|
|
|
* store.
|
|
|
|
*
|
|
|
|
* Store implementations require a number of functions to be implemented. These
|
|
|
|
* are all documented below.
|
|
|
|
*
|
|
|
|
* For stores that deal with many records or which have expensive store access
|
|
|
|
* routines, it is highly recommended to implement a custom applyIncomingBatch
|
|
|
|
* and/or applyIncoming function on top of the basic APIs.
|
2011-01-19 03:23:25 +03:00
|
|
|
*/
|
|
|
|
|
2012-10-31 20:13:28 +04:00
|
|
|
this.Store = function Store(name, engine) {
|
2012-08-30 01:43:41 +04:00
|
|
|
if (!engine) {
|
|
|
|
throw new Error("Store must be associated with an Engine instance.");
|
|
|
|
}
|
|
|
|
|
2011-01-19 03:23:25 +03:00
|
|
|
name = name || "Unnamed";
|
|
|
|
this.name = name.toLowerCase();
|
2012-08-30 01:43:41 +04:00
|
|
|
this.engine = engine;
|
2011-01-19 03:23:25 +03:00
|
|
|
|
2013-08-26 22:55:58 +04:00
|
|
|
this._log = Log.repository.getLogger("Sync.Store." + name);
|
2011-01-19 03:23:25 +03:00
|
|
|
let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
|
2013-08-26 22:55:58 +04:00
|
|
|
this._log.level = Log.Level[level];
|
2011-03-08 00:07:59 +03:00
|
|
|
|
2011-05-20 05:08:35 +04:00
|
|
|
XPCOMUtils.defineLazyGetter(this, "_timer", function() {
|
2011-03-08 00:07:59 +03:00
|
|
|
return Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
|
|
|
|
});
|
2011-01-19 03:23:25 +03:00
|
|
|
}
|
|
|
|
Store.prototype = {
|
2011-01-28 20:39:12 +03:00
|
|
|
|
2011-03-08 00:07:59 +03:00
|
|
|
_sleep: function _sleep(delay) {
|
2011-05-31 05:52:23 +04:00
|
|
|
let cb = Async.makeSyncCallback();
|
2011-06-06 23:27:36 +04:00
|
|
|
this._timer.initWithCallback(cb, delay, Ci.nsITimer.TYPE_ONE_SHOT);
|
2011-05-31 05:52:23 +04:00
|
|
|
Async.waitForSyncCallback(cb);
|
2011-03-08 00:07:59 +03:00
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Apply multiple incoming records against the store.
|
|
|
|
*
|
|
|
|
* This is called with a set of incoming records to process. The function
|
|
|
|
* should look at each record, reconcile with the current local state, and
|
|
|
|
* make the local changes required to bring its state in alignment with the
|
|
|
|
* record.
|
|
|
|
*
|
|
|
|
* The default implementation simply iterates over all records and calls
|
|
|
|
* applyIncoming(). Store implementations may overwrite this function
|
|
|
|
* if desired.
|
|
|
|
*
|
|
|
|
* @param records Array of records to apply
|
|
|
|
* @return Array of record IDs which did not apply cleanly
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
applyIncomingBatch: function (records) {
|
2011-01-28 20:39:12 +03:00
|
|
|
let failed = [];
|
2015-10-18 21:52:58 +03:00
|
|
|
for (let record of records) {
|
2011-01-28 20:39:12 +03:00
|
|
|
try {
|
|
|
|
this.applyIncoming(record);
|
2016-01-26 18:13:31 +03:00
|
|
|
} catch (ex) {
|
|
|
|
if (ex.code == Engine.prototype.eEngineAbortApplyIncoming) {
|
|
|
|
// This kind of exception should have a 'cause' attribute, which is an
|
|
|
|
// originating exception.
|
|
|
|
// ex.cause will carry its stack with it when rethrown.
|
|
|
|
throw ex.cause;
|
|
|
|
}
|
|
|
|
if (Async.isShutdownException(ex)) {
|
|
|
|
throw ex;
|
|
|
|
}
|
2016-01-07 09:09:00 +03:00
|
|
|
this._log.warn("Failed to apply incoming record " + record.id, ex);
|
2015-09-16 04:18:04 +03:00
|
|
|
this.engine._noteApplyFailure();
|
2011-01-28 20:39:12 +03:00
|
|
|
failed.push(record.id);
|
|
|
|
}
|
2011-03-08 00:07:59 +03:00
|
|
|
};
|
2011-01-28 20:39:12 +03:00
|
|
|
return failed;
|
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Apply a single record against the store.
|
|
|
|
*
|
|
|
|
* This takes a single record and makes the local changes required so the
|
|
|
|
* local state matches what's in the record.
|
|
|
|
*
|
|
|
|
* The default implementation calls one of remove(), create(), or update()
|
|
|
|
* depending on the state obtained from the store itself. Store
|
|
|
|
* implementations may overwrite this function if desired.
|
|
|
|
*
|
|
|
|
* @param record
|
|
|
|
* Record to apply
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
applyIncoming: function (record) {
|
2011-01-19 03:23:25 +03:00
|
|
|
if (record.deleted)
|
|
|
|
this.remove(record);
|
|
|
|
else if (!this.itemExists(record.id))
|
|
|
|
this.create(record);
|
|
|
|
else
|
|
|
|
this.update(record);
|
|
|
|
},
|
|
|
|
|
|
|
|
// override these in derived objects
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Create an item in the store from a record.
|
|
|
|
*
|
|
|
|
* This is called by the default implementation of applyIncoming(). If using
|
|
|
|
* applyIncomingBatch(), this won't be called unless your store calls it.
|
|
|
|
*
|
|
|
|
* @param record
|
|
|
|
* The store record to create an item from
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
create: function (record) {
|
2011-01-19 03:23:25 +03:00
|
|
|
throw "override create in a subclass";
|
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Remove an item in the store from a record.
|
|
|
|
*
|
|
|
|
* This is called by the default implementation of applyIncoming(). If using
|
|
|
|
* applyIncomingBatch(), this won't be called unless your store calls it.
|
|
|
|
*
|
|
|
|
* @param record
|
|
|
|
* The store record to delete an item from
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
remove: function (record) {
|
2011-01-19 03:23:25 +03:00
|
|
|
throw "override remove in a subclass";
|
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Update an item from a record.
|
|
|
|
*
|
|
|
|
* This is called by the default implementation of applyIncoming(). If using
|
|
|
|
* applyIncomingBatch(), this won't be called unless your store calls it.
|
|
|
|
*
|
|
|
|
* @param record
|
|
|
|
* The record to use to update an item from
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
update: function (record) {
|
2011-01-19 03:23:25 +03:00
|
|
|
throw "override update in a subclass";
|
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Determine whether a record with the specified ID exists.
|
|
|
|
*
|
|
|
|
* Takes a string record ID and returns a booleans saying whether the record
|
|
|
|
* exists.
|
|
|
|
*
|
|
|
|
* @param id
|
|
|
|
* string record ID
|
|
|
|
* @return boolean indicating whether record exists locally
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
itemExists: function (id) {
|
2011-01-19 03:23:25 +03:00
|
|
|
throw "override itemExists in a subclass";
|
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Create a record from the specified ID.
|
|
|
|
*
|
|
|
|
* If the ID is known, the record should be populated with metadata from
|
|
|
|
* the store. If the ID is not known, the record should be created with the
|
|
|
|
* delete field set to true.
|
|
|
|
*
|
|
|
|
* @param id
|
|
|
|
* string record ID
|
|
|
|
* @param collection
|
|
|
|
* Collection to add record to. This is typically passed into the
|
|
|
|
* constructor for the newly-created record.
|
|
|
|
* @return record type for this engine
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
createRecord: function (id, collection) {
|
2011-01-19 03:23:25 +03:00
|
|
|
throw "override createRecord in a subclass";
|
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Change the ID of a record.
|
|
|
|
*
|
|
|
|
* @param oldID
|
|
|
|
* string old/current record ID
|
|
|
|
* @param newID
|
|
|
|
* string new record ID
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
changeItemID: function (oldID, newID) {
|
2011-01-19 03:23:25 +03:00
|
|
|
throw "override changeItemID in a subclass";
|
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Obtain the set of all known record IDs.
|
|
|
|
*
|
|
|
|
* @return Object with ID strings as keys and values of true. The values
|
|
|
|
* are ignored.
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
getAllIDs: function () {
|
2011-01-19 03:23:25 +03:00
|
|
|
throw "override getAllIDs in a subclass";
|
|
|
|
},
|
|
|
|
|
2011-09-09 21:34:58 +04:00
|
|
|
/**
|
|
|
|
* Wipe all data in the store.
|
|
|
|
*
|
|
|
|
* This function is called during remote wipes or when replacing local data
|
|
|
|
* with remote data.
|
|
|
|
*
|
|
|
|
* This function should delete all local data that the store is managing. It
|
|
|
|
* can be thought of as clearing out all state and restoring the "new
|
|
|
|
* browser" state.
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
wipe: function () {
|
2011-01-19 03:23:25 +03:00
|
|
|
throw "override wipe in a subclass";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-10-31 20:13:28 +04:00
|
|
|
this.EngineManager = function EngineManager(service) {
|
2012-08-30 01:43:41 +04:00
|
|
|
this.service = service;
|
2008-04-15 05:53:35 +04:00
|
|
|
|
|
|
|
this._engines = {};
|
2014-03-14 03:37:25 +04:00
|
|
|
|
|
|
|
// This will be populated by Service on startup.
|
|
|
|
this._declined = new Set();
|
2013-08-26 22:55:58 +04:00
|
|
|
this._log = Log.repository.getLogger("Sync.EngineManager");
|
2014-03-14 03:37:25 +04:00
|
|
|
this._log.level = Log.Level[Svc.Prefs.get("log.logger.service.engines", "Debug")];
|
2007-12-11 08:38:53 +03:00
|
|
|
}
|
2012-08-30 01:43:40 +04:00
|
|
|
EngineManager.prototype = {
|
2013-12-19 09:42:17 +04:00
|
|
|
get: function (name) {
|
2009-03-20 10:13:16 +03:00
|
|
|
// Return an array of engines if we have an array of names
|
2011-08-26 21:26:11 +04:00
|
|
|
if (Array.isArray(name)) {
|
2009-03-20 10:13:16 +03:00
|
|
|
let engines = [];
|
|
|
|
name.forEach(function(name) {
|
|
|
|
let engine = this.get(name);
|
2013-12-19 09:42:17 +04:00
|
|
|
if (engine) {
|
2009-03-20 10:13:16 +03:00
|
|
|
engines.push(engine);
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2009-03-20 10:13:16 +03:00
|
|
|
}, this);
|
|
|
|
return engines;
|
|
|
|
}
|
|
|
|
|
|
|
|
let engine = this._engines[name];
|
2010-11-30 03:41:17 +03:00
|
|
|
if (!engine) {
|
2009-03-20 10:13:16 +03:00
|
|
|
this._log.debug("Could not get engine: " + name);
|
2013-12-19 09:42:17 +04:00
|
|
|
if (Object.keys) {
|
2010-11-30 03:41:17 +03:00
|
|
|
this._log.debug("Engines are: " + JSON.stringify(Object.keys(this._engines)));
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2010-11-30 03:41:17 +03:00
|
|
|
}
|
2009-03-20 10:13:16 +03:00
|
|
|
return engine;
|
2008-04-15 05:53:35 +04:00
|
|
|
},
|
2012-08-30 01:43:40 +04:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
getAll: function () {
|
2016-01-07 03:02:16 +03:00
|
|
|
let engines = [];
|
2016-08-09 05:40:23 +03:00
|
|
|
for (let [, engine] of Object.entries(this._engines)) {
|
2016-01-07 03:02:16 +03:00
|
|
|
engines.push(engine);
|
|
|
|
}
|
|
|
|
return engines;
|
2008-04-15 05:53:35 +04:00
|
|
|
},
|
2012-08-30 01:43:40 +04:00
|
|
|
|
2014-03-14 03:37:25 +04:00
|
|
|
/**
|
|
|
|
* N.B., does not pay attention to the declined list.
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
getEnabled: function () {
|
2014-10-01 10:44:52 +04:00
|
|
|
return this.getAll()
|
|
|
|
.filter((engine) => engine.enabled)
|
|
|
|
.sort((a, b) => a.syncPriority - b.syncPriority);
|
2008-07-10 04:17:24 +04:00
|
|
|
},
|
2012-08-30 01:43:40 +04:00
|
|
|
|
2014-03-14 03:37:25 +04:00
|
|
|
get enabledEngineNames() {
|
2015-10-18 21:52:58 +03:00
|
|
|
return this.getEnabled().map(e => e.name);
|
2014-03-14 03:37:25 +04:00
|
|
|
},
|
|
|
|
|
|
|
|
persistDeclined: function () {
|
|
|
|
Svc.Prefs.set("declinedEngines", [...this._declined].join(","));
|
|
|
|
},
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns an array.
|
|
|
|
*/
|
|
|
|
getDeclined: function () {
|
|
|
|
return [...this._declined];
|
|
|
|
},
|
|
|
|
|
|
|
|
setDeclined: function (engines) {
|
|
|
|
this._declined = new Set(engines);
|
|
|
|
this.persistDeclined();
|
|
|
|
},
|
|
|
|
|
|
|
|
isDeclined: function (engineName) {
|
|
|
|
return this._declined.has(engineName);
|
|
|
|
},
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Accepts a Set or an array.
|
|
|
|
*/
|
|
|
|
decline: function (engines) {
|
|
|
|
for (let e of engines) {
|
|
|
|
this._declined.add(e);
|
|
|
|
}
|
|
|
|
this.persistDeclined();
|
|
|
|
},
|
|
|
|
|
|
|
|
undecline: function (engines) {
|
|
|
|
for (let e of engines) {
|
|
|
|
this._declined.delete(e);
|
|
|
|
}
|
|
|
|
this.persistDeclined();
|
|
|
|
},
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Mark any non-enabled engines as declined.
|
|
|
|
*
|
|
|
|
* This is useful after initial customization during setup.
|
|
|
|
*/
|
|
|
|
declineDisabled: function () {
|
|
|
|
for (let e of this.getAll()) {
|
|
|
|
if (!e.enabled) {
|
|
|
|
this._log.debug("Declining disabled engine " + e.name);
|
|
|
|
this._declined.add(e.name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
this.persistDeclined();
|
|
|
|
},
|
|
|
|
|
2009-04-08 01:45:41 +04:00
|
|
|
/**
|
|
|
|
* Register an Engine to the service. Alternatively, give an array of engine
|
|
|
|
* objects to register.
|
|
|
|
*
|
|
|
|
* @param engineObject
|
|
|
|
* Engine object used to get an instance of the engine
|
|
|
|
* @return The engine object if anything failed
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
register: function (engineObject) {
|
|
|
|
if (Array.isArray(engineObject)) {
|
2009-04-08 01:45:41 +04:00
|
|
|
return engineObject.map(this.register, this);
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2009-04-08 01:45:41 +04:00
|
|
|
|
|
|
|
try {
|
2012-08-30 01:43:41 +04:00
|
|
|
let engine = new engineObject(this.service);
|
2010-02-12 02:29:15 +03:00
|
|
|
let name = engine.name;
|
2013-12-19 09:42:17 +04:00
|
|
|
if (name in this._engines) {
|
2009-04-08 23:39:14 +04:00
|
|
|
this._log.error("Engine '" + name + "' is already registered!");
|
2013-12-19 09:42:17 +04:00
|
|
|
} else {
|
2010-02-12 02:29:15 +03:00
|
|
|
this._engines[name] = engine;
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
|
|
|
} catch (ex) {
|
2009-04-08 01:45:41 +04:00
|
|
|
let name = engineObject || "";
|
|
|
|
name = name.prototype || "";
|
|
|
|
name = name.name || "";
|
|
|
|
|
2016-01-07 09:09:00 +03:00
|
|
|
this._log.error(`Could not initialize engine ${name}`, ex);
|
2009-04-08 01:45:41 +04:00
|
|
|
return engineObject;
|
|
|
|
}
|
2008-04-15 05:53:35 +04:00
|
|
|
},
|
2012-08-30 01:43:40 +04:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
unregister: function (val) {
|
2008-04-15 05:53:35 +04:00
|
|
|
let name = val;
|
2013-12-19 09:42:17 +04:00
|
|
|
if (val instanceof Engine) {
|
2008-04-15 05:53:35 +04:00
|
|
|
name = val.name;
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2008-04-15 05:53:35 +04:00
|
|
|
delete this._engines[name];
|
2012-08-30 01:43:40 +04:00
|
|
|
},
|
2012-08-30 01:43:41 +04:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
clear: function () {
|
2012-08-30 01:43:41 +04:00
|
|
|
for (let name in this._engines) {
|
|
|
|
delete this._engines[name];
|
|
|
|
}
|
|
|
|
},
|
2008-04-15 05:53:35 +04:00
|
|
|
};
|
|
|
|
|
2012-10-31 20:13:28 +04:00
|
|
|
this.Engine = function Engine(name, service) {
|
2012-08-30 01:43:41 +04:00
|
|
|
if (!service) {
|
|
|
|
throw new Error("Engine must be associated with a Service instance.");
|
|
|
|
}
|
|
|
|
|
2010-02-12 02:29:15 +03:00
|
|
|
this.Name = name || "Unnamed";
|
|
|
|
this.name = name.toLowerCase();
|
2012-08-30 01:43:41 +04:00
|
|
|
this.service = service;
|
2010-02-12 02:29:15 +03:00
|
|
|
|
2010-02-12 02:25:31 +03:00
|
|
|
this._notify = Utils.notify("weave:engine:");
|
2013-08-26 22:55:58 +04:00
|
|
|
this._log = Log.repository.getLogger("Sync.Engine." + this.Name);
|
2010-02-12 02:25:31 +03:00
|
|
|
let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
|
2013-08-26 22:55:58 +04:00
|
|
|
this._log.level = Log.Level[level];
|
2010-02-12 02:25:31 +03:00
|
|
|
|
|
|
|
this._tracker; // initialize tracker to load previously changed IDs
|
|
|
|
this._log.debug("Engine initialized");
|
|
|
|
}
|
2007-12-15 05:07:25 +03:00
|
|
|
Engine.prototype = {
|
2009-01-07 00:54:18 +03:00
|
|
|
// _storeObj, and _trackerObj should to be overridden in subclasses
|
|
|
|
_storeObj: Store,
|
|
|
|
_trackerObj: Tracker,
|
2007-12-15 05:07:25 +03:00
|
|
|
|
2011-06-15 11:03:32 +04:00
|
|
|
// Local 'constant'.
|
|
|
|
// Signal to the engine that processing further records is pointless.
|
|
|
|
eEngineAbortApplyIncoming: "error.engine.abort.applyincoming",
|
|
|
|
|
2016-08-24 23:02:14 +03:00
|
|
|
// Should we keep syncing if we find a record that cannot be uploaded (ever)?
|
|
|
|
// If this is false, we'll throw, otherwise, we'll ignore the record and
|
|
|
|
// continue. This currently can only happen due to the record being larger
|
|
|
|
// than the record upload limit.
|
|
|
|
allowSkippedRecord: false,
|
|
|
|
|
2015-01-25 10:50:01 +03:00
|
|
|
get prefName() {
|
|
|
|
return this.name;
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
get enabled() {
|
|
|
|
return Svc.Prefs.get("engine." + this.prefName, false);
|
|
|
|
},
|
|
|
|
|
|
|
|
set enabled(val) {
|
|
|
|
Svc.Prefs.set("engine." + this.prefName, !!val);
|
|
|
|
},
|
2009-05-14 20:38:15 +04:00
|
|
|
|
2015-01-25 10:50:01 +03:00
|
|
|
get score() {
|
|
|
|
return this._tracker.score;
|
|
|
|
},
|
2008-04-16 04:21:34 +04:00
|
|
|
|
2007-12-15 05:07:25 +03:00
|
|
|
get _store() {
|
2012-08-30 01:43:41 +04:00
|
|
|
let store = new this._storeObj(this.Name, this);
|
2015-01-25 10:50:01 +03:00
|
|
|
this.__defineGetter__("_store", () => store);
|
2010-02-12 02:29:15 +03:00
|
|
|
return store;
|
2007-12-15 05:07:25 +03:00
|
|
|
},
|
2008-05-23 22:05:42 +04:00
|
|
|
|
2008-05-23 02:58:29 +04:00
|
|
|
get _tracker() {
|
2012-08-30 01:43:41 +04:00
|
|
|
let tracker = new this._trackerObj(this.Name, this);
|
2015-01-25 10:50:01 +03:00
|
|
|
this.__defineGetter__("_tracker", () => tracker);
|
2010-02-12 02:29:15 +03:00
|
|
|
return tracker;
|
2007-12-11 08:38:53 +03:00
|
|
|
},
|
2009-11-21 01:34:20 +03:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
sync: function () {
|
|
|
|
if (!this.enabled) {
|
2010-04-30 03:42:21 +04:00
|
|
|
return;
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2010-04-30 03:42:21 +04:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
if (!this._sync) {
|
2009-01-07 00:54:18 +03:00
|
|
|
throw "engine does not implement _sync method";
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2009-09-04 07:11:36 +04:00
|
|
|
|
2011-03-03 02:27:16 +03:00
|
|
|
this._notify("sync", this.name, this._sync)();
|
2008-07-25 12:06:23 +04:00
|
|
|
},
|
|
|
|
|
2009-02-27 09:36:14 +03:00
|
|
|
/**
|
2013-12-19 09:42:17 +04:00
|
|
|
* Get rid of any local meta-data.
|
2009-02-27 09:36:14 +03:00
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
resetClient: function () {
|
|
|
|
if (!this._resetClient) {
|
2009-02-27 09:36:14 +03:00
|
|
|
throw "engine does not implement _resetClient method";
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2009-02-27 09:36:14 +03:00
|
|
|
|
2009-06-06 02:34:34 +04:00
|
|
|
this._notify("reset-client", this.name, this._resetClient)();
|
2009-02-27 09:36:14 +03:00
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
_wipeClient: function () {
|
2009-06-06 02:34:34 +04:00
|
|
|
this.resetClient();
|
2009-01-07 00:54:18 +03:00
|
|
|
this._log.debug("Deleting all local data");
|
2010-04-08 04:52:22 +04:00
|
|
|
this._tracker.ignoreAll = true;
|
2009-01-07 00:54:18 +03:00
|
|
|
this._store.wipe();
|
2010-04-08 04:52:22 +04:00
|
|
|
this._tracker.ignoreAll = false;
|
2010-05-04 01:39:32 +04:00
|
|
|
this._tracker.clearChangedIDs();
|
2008-07-25 12:06:23 +04:00
|
|
|
},
|
2009-02-27 09:36:14 +03:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
wipeClient: function () {
|
2009-06-06 02:34:35 +04:00
|
|
|
this._notify("wipe-client", this.name, this._wipeClient)();
|
2016-09-12 21:59:25 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
/**
|
|
|
|
* If one exists, initialize and return a validator for this engine (which
|
|
|
|
* must have a `validate(engine)` method that returns a promise to an object
|
|
|
|
* with a getSummary method). Otherwise return null.
|
|
|
|
*/
|
|
|
|
getValidator: function () {
|
|
|
|
return null;
|
2008-07-25 12:06:23 +04:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2012-10-31 20:13:28 +04:00
|
|
|
this.SyncEngine = function SyncEngine(name, service) {
|
2012-08-30 01:43:41 +04:00
|
|
|
Engine.call(this, name || "SyncEngine", service);
|
|
|
|
|
2011-01-27 08:34:31 +03:00
|
|
|
this.loadToFetch();
|
2011-05-28 04:32:13 +04:00
|
|
|
this.loadPreviousFailed();
|
2010-02-12 02:25:31 +03:00
|
|
|
}
|
2011-03-21 02:10:40 +03:00
|
|
|
|
|
|
|
// Enumeration to define approaches to handling bad records.
|
|
|
|
// Attached to the constructor to allow use as a kind of static enumeration.
|
|
|
|
SyncEngine.kRecoveryStrategy = {
|
|
|
|
ignore: "ignore",
|
|
|
|
retry: "retry",
|
|
|
|
error: "error"
|
|
|
|
};
|
|
|
|
|
2008-12-05 11:39:54 +03:00
|
|
|
SyncEngine.prototype = {
|
2008-11-07 06:18:07 +03:00
|
|
|
__proto__: Engine.prototype,
|
2009-01-07 00:54:18 +03:00
|
|
|
_recordObj: CryptoWrapper,
|
2010-03-25 20:05:21 +03:00
|
|
|
version: 1,
|
2014-03-14 03:37:25 +04:00
|
|
|
|
2014-11-13 22:05:28 +03:00
|
|
|
// Which sortindex to use when retrieving records for this engine.
|
|
|
|
_defaultSort: undefined,
|
|
|
|
|
2014-10-01 10:44:52 +04:00
|
|
|
// A relative priority to use when computing an order
|
|
|
|
// for engines to be synced. Higher-priority engines
|
|
|
|
// (lower numbers) are synced first.
|
|
|
|
// It is recommended that a unique value be used for each engine,
|
|
|
|
// in order to guarantee a stable sequence.
|
|
|
|
syncPriority: 0,
|
|
|
|
|
2011-04-07 04:18:22 +04:00
|
|
|
// How many records to pull in a single sync. This is primarily to avoid very
|
|
|
|
// long first syncs against profiles with many history records.
|
2011-01-28 20:39:12 +03:00
|
|
|
downloadLimit: null,
|
2014-03-14 03:37:25 +04:00
|
|
|
|
2011-04-07 04:18:22 +04:00
|
|
|
// How many records to pull at one time when specifying IDs. This is to avoid
|
|
|
|
// URI length limitations.
|
|
|
|
guidFetchBatchSize: DEFAULT_GUID_FETCH_BATCH_SIZE,
|
|
|
|
mobileGUIDFetchBatchSize: DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE,
|
2014-03-14 03:37:25 +04:00
|
|
|
|
2011-04-07 04:18:22 +04:00
|
|
|
// How many records to process in a single batch.
|
2011-01-28 20:39:12 +03:00
|
|
|
applyIncomingBatchSize: DEFAULT_STORE_BATCH_SIZE,
|
2009-01-07 00:54:18 +03:00
|
|
|
|
2015-01-25 10:50:01 +03:00
|
|
|
get storageURL() {
|
|
|
|
return this.service.storageURL;
|
|
|
|
},
|
2008-12-05 11:39:54 +03:00
|
|
|
|
2015-01-25 10:50:01 +03:00
|
|
|
get engineURL() {
|
|
|
|
return this.storageURL + this.name;
|
|
|
|
},
|
2008-12-05 11:39:54 +03:00
|
|
|
|
2015-01-25 10:50:01 +03:00
|
|
|
get cryptoKeysURL() {
|
|
|
|
return this.storageURL + "crypto/keys";
|
|
|
|
},
|
2008-11-20 03:20:25 +03:00
|
|
|
|
2015-01-25 10:50:01 +03:00
|
|
|
get metaURL() {
|
|
|
|
return this.storageURL + "meta/global";
|
|
|
|
},
|
2010-03-25 20:05:21 +03:00
|
|
|
|
|
|
|
get syncID() {
|
|
|
|
// Generate a random syncID if we don't have one
|
|
|
|
let syncID = Svc.Prefs.get(this.name + ".syncID", "");
|
|
|
|
return syncID == "" ? this.syncID = Utils.makeGUID() : syncID;
|
|
|
|
},
|
|
|
|
set syncID(value) {
|
|
|
|
Svc.Prefs.set(this.name + ".syncID", value);
|
|
|
|
},
|
|
|
|
|
2010-11-11 22:00:35 +03:00
|
|
|
/*
|
|
|
|
* lastSync is a timestamp in server time.
|
|
|
|
*/
|
2008-11-20 03:20:25 +03:00
|
|
|
get lastSync() {
|
2009-06-17 04:22:59 +04:00
|
|
|
return parseFloat(Svc.Prefs.get(this.name + ".lastSync", "0"));
|
2008-11-20 03:20:25 +03:00
|
|
|
},
|
2008-11-07 10:23:35 +03:00
|
|
|
set lastSync(value) {
|
2009-06-17 04:22:59 +04:00
|
|
|
// Reset the pref in-case it's a number instead of a string
|
2009-02-24 06:33:40 +03:00
|
|
|
Svc.Prefs.reset(this.name + ".lastSync");
|
2009-06-17 04:22:59 +04:00
|
|
|
// Store the value as a string to keep floating point precision
|
|
|
|
Svc.Prefs.set(this.name + ".lastSync", value.toString());
|
2009-02-18 00:20:02 +03:00
|
|
|
},
|
2013-12-19 09:42:17 +04:00
|
|
|
resetLastSync: function () {
|
2009-02-18 00:20:02 +03:00
|
|
|
this._log.debug("Resetting " + this.name + " last sync time");
|
|
|
|
Svc.Prefs.reset(this.name + ".lastSync");
|
2009-06-17 04:22:59 +04:00
|
|
|
Svc.Prefs.set(this.name + ".lastSync", "0");
|
2010-11-11 22:00:35 +03:00
|
|
|
this.lastSyncLocal = 0;
|
|
|
|
},
|
|
|
|
|
2015-01-25 10:50:01 +03:00
|
|
|
get toFetch() {
|
|
|
|
return this._toFetch;
|
|
|
|
},
|
2011-01-27 08:34:31 +03:00
|
|
|
set toFetch(val) {
|
2015-07-16 03:58:02 +03:00
|
|
|
let cb = (error) => {
|
|
|
|
if (error) {
|
|
|
|
this._log.error("Failed to read JSON records to fetch", error);
|
|
|
|
}
|
|
|
|
}
|
2011-05-28 04:32:13 +04:00
|
|
|
// Coerce the array to a string for more efficient comparison.
|
|
|
|
if (val + "" == this._toFetch) {
|
|
|
|
return;
|
|
|
|
}
|
2011-01-27 08:34:31 +03:00
|
|
|
this._toFetch = val;
|
2011-06-06 23:27:36 +04:00
|
|
|
Utils.namedTimer(function () {
|
2014-04-13 22:28:20 +04:00
|
|
|
Utils.jsonSave("toFetch/" + this.name, this, val, cb);
|
2011-01-27 08:34:31 +03:00
|
|
|
}, 0, this, "_toFetchDelay");
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
loadToFetch: function () {
|
|
|
|
// Initialize to empty if there's no file.
|
2011-01-27 08:34:31 +03:00
|
|
|
this._toFetch = [];
|
|
|
|
Utils.jsonLoad("toFetch/" + this.name, this, function(toFetch) {
|
2011-01-27 08:43:24 +03:00
|
|
|
if (toFetch) {
|
|
|
|
this._toFetch = toFetch;
|
|
|
|
}
|
2011-01-27 08:34:31 +03:00
|
|
|
});
|
|
|
|
},
|
|
|
|
|
2015-01-25 10:50:01 +03:00
|
|
|
get previousFailed() {
|
|
|
|
return this._previousFailed;
|
|
|
|
},
|
2011-05-28 04:32:13 +04:00
|
|
|
set previousFailed(val) {
|
2016-04-06 05:10:26 +03:00
|
|
|
let cb = (error) => {
|
|
|
|
if (error) {
|
|
|
|
this._log.error("Failed to set previousFailed", error);
|
|
|
|
} else {
|
|
|
|
this._log.debug("Successfully wrote previousFailed.");
|
|
|
|
}
|
|
|
|
}
|
2011-05-28 04:32:13 +04:00
|
|
|
// Coerce the array to a string for more efficient comparison.
|
|
|
|
if (val + "" == this._previousFailed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
this._previousFailed = val;
|
2011-06-06 23:27:36 +04:00
|
|
|
Utils.namedTimer(function () {
|
2014-04-13 22:28:20 +04:00
|
|
|
Utils.jsonSave("failed/" + this.name, this, val, cb);
|
2011-05-28 04:32:13 +04:00
|
|
|
}, 0, this, "_previousFailedDelay");
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
loadPreviousFailed: function () {
|
2011-05-28 04:32:13 +04:00
|
|
|
// Initialize to empty if there's no file
|
|
|
|
this._previousFailed = [];
|
|
|
|
Utils.jsonLoad("failed/" + this.name, this, function(previousFailed) {
|
|
|
|
if (previousFailed) {
|
|
|
|
this._previousFailed = previousFailed;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
},
|
|
|
|
|
2010-11-11 22:00:35 +03:00
|
|
|
/*
|
|
|
|
* lastSyncLocal is a timestamp in local time.
|
|
|
|
*/
|
|
|
|
get lastSyncLocal() {
|
|
|
|
return parseInt(Svc.Prefs.get(this.name + ".lastSyncLocal", "0"), 10);
|
|
|
|
},
|
|
|
|
set lastSyncLocal(value) {
|
|
|
|
// Store as a string because pref can only store C longs as numbers.
|
|
|
|
Svc.Prefs.set(this.name + ".lastSyncLocal", value.toString());
|
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
2016-09-06 21:39:13 +03:00
|
|
|
* Returns a changeset for this sync. Engine implementations can override this
|
|
|
|
* method to bypass the tracker for certain or all changed items.
|
2010-11-11 22:00:35 +03:00
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
getChangedIDs: function () {
|
2010-11-11 22:00:35 +03:00
|
|
|
return this._tracker.changedIDs;
|
2008-11-07 10:23:35 +03:00
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
// Create a new record using the store and add in crypto fields.
|
|
|
|
_createRecord: function (id) {
|
2010-11-30 03:41:17 +03:00
|
|
|
let record = this._store.createRecord(id, this.name);
|
2010-03-06 01:46:48 +03:00
|
|
|
record.id = id;
|
2010-11-30 03:41:17 +03:00
|
|
|
record.collection = this.name;
|
2010-03-06 01:46:48 +03:00
|
|
|
return record;
|
2008-11-20 03:20:25 +03:00
|
|
|
},
|
|
|
|
|
2008-12-05 11:39:54 +03:00
|
|
|
// Any setup that needs to happen at the beginning of each sync.
|
2013-12-19 09:42:17 +04:00
|
|
|
_syncStartup: function () {
|
2010-05-21 23:15:58 +04:00
|
|
|
|
2010-03-25 20:05:21 +03:00
|
|
|
// Determine if we need to wipe on outdated versions
|
2012-09-15 03:02:32 +04:00
|
|
|
let metaGlobal = this.service.recordManager.get(this.metaURL);
|
2010-03-25 20:05:21 +03:00
|
|
|
let engines = metaGlobal.payload.engines || {};
|
|
|
|
let engineData = engines[this.name] || {};
|
|
|
|
|
2010-11-30 03:41:17 +03:00
|
|
|
let needsWipe = false;
|
|
|
|
|
2010-03-25 20:05:21 +03:00
|
|
|
// Assume missing versions are 0 and wipe the server
|
|
|
|
if ((engineData.version || 0) < this.version) {
|
|
|
|
this._log.debug("Old engine data: " + [engineData.version, this.version]);
|
|
|
|
|
|
|
|
// Prepare to clear the server and upload everything
|
2010-11-30 03:41:17 +03:00
|
|
|
needsWipe = true;
|
2010-03-25 20:05:21 +03:00
|
|
|
this.syncID = "";
|
|
|
|
|
|
|
|
// Set the newer version and newly generated syncID
|
|
|
|
engineData.version = this.version;
|
|
|
|
engineData.syncID = this.syncID;
|
|
|
|
|
|
|
|
// Put the new data back into meta/global and mark for upload
|
|
|
|
engines[this.name] = engineData;
|
|
|
|
metaGlobal.payload.engines = engines;
|
|
|
|
metaGlobal.changed = true;
|
|
|
|
}
|
|
|
|
// Don't sync this engine if the server has newer data
|
|
|
|
else if (engineData.version > this.version) {
|
|
|
|
let error = new String("New data: " + [engineData.version, this.version]);
|
|
|
|
error.failureCode = VERSION_OUT_OF_DATE;
|
|
|
|
throw error;
|
|
|
|
}
|
|
|
|
// Changes to syncID mean we'll need to upload everything
|
|
|
|
else if (engineData.syncID != this.syncID) {
|
|
|
|
this._log.debug("Engine syncIDs: " + [engineData.syncID, this.syncID]);
|
|
|
|
this.syncID = engineData.syncID;
|
|
|
|
this._resetClient();
|
|
|
|
};
|
|
|
|
|
2010-11-30 03:41:17 +03:00
|
|
|
// Delete any existing data and reupload on bad version or missing meta.
|
|
|
|
// No crypto component here...? We could regenerate per-collection keys...
|
|
|
|
if (needsWipe) {
|
2011-10-28 09:25:00 +04:00
|
|
|
this.wipeServer();
|
2008-12-05 11:39:54 +03:00
|
|
|
}
|
2008-11-24 19:04:14 +03:00
|
|
|
|
2010-11-24 08:21:31 +03:00
|
|
|
// Save objects that need to be uploaded in this._modified. We also save
|
|
|
|
// the timestamp of this fetch in this.lastSyncLocal. As we successfully
|
|
|
|
// upload objects we remove them from this._modified. If an error occurs
|
|
|
|
// or any objects fail to upload, they will remain in this._modified. At
|
|
|
|
// the end of a sync, or after an error, we add all objects remaining in
|
|
|
|
// this._modified to the tracker.
|
|
|
|
this.lastSyncLocal = Date.now();
|
2010-11-11 22:00:35 +03:00
|
|
|
if (this.lastSync) {
|
2016-09-06 21:39:13 +03:00
|
|
|
this._modified = this.pullNewChanges();
|
2010-11-11 22:00:35 +03:00
|
|
|
} else {
|
2009-12-18 05:51:55 +03:00
|
|
|
this._log.debug("First sync, uploading all items");
|
2016-09-06 21:39:13 +03:00
|
|
|
this._modified = this.pullAllChanges();
|
2008-11-08 13:00:33 +03:00
|
|
|
}
|
2010-11-24 08:21:31 +03:00
|
|
|
// Clear the tracker now. If the sync fails we'll add the ones we failed
|
|
|
|
// to upload back.
|
|
|
|
this._tracker.clearChangedIDs();
|
2012-01-26 01:32:05 +04:00
|
|
|
|
2016-09-06 21:39:13 +03:00
|
|
|
this._log.info(this._modified.count() +
|
2010-11-24 08:21:31 +03:00
|
|
|
" outgoing items pre-reconciliation");
|
2009-09-01 04:30:34 +04:00
|
|
|
|
|
|
|
// Keep track of what to delete at the end of sync
|
|
|
|
this._delete = {};
|
2008-12-05 11:39:54 +03:00
|
|
|
},
|
|
|
|
|
2012-12-15 02:00:40 +04:00
|
|
|
/**
|
|
|
|
* A tiny abstraction to make it easier to test incoming record
|
|
|
|
* application.
|
|
|
|
*/
|
2016-09-12 21:59:25 +03:00
|
|
|
itemSource: function () {
|
2012-12-15 02:00:40 +04:00
|
|
|
return new Collection(this.engineURL, this._recordObj, this.service);
|
|
|
|
},
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Process incoming records.
|
|
|
|
* In the most awful and untestable way possible.
|
|
|
|
* This now accepts something that makes testing vaguely less impossible.
|
|
|
|
*/
|
|
|
|
_processIncoming: function (newitems) {
|
2009-12-18 05:51:55 +03:00
|
|
|
this._log.trace("Downloading & applying server changes");
|
2008-11-24 19:04:14 +03:00
|
|
|
|
2010-06-02 02:07:50 +04:00
|
|
|
// Figure out how many total items to fetch this sync; do less on mobile.
|
2013-05-29 22:11:38 +04:00
|
|
|
let batchSize = this.downloadLimit || Infinity;
|
2011-04-07 04:18:22 +04:00
|
|
|
let isMobile = (Svc.Prefs.get("client.type") == "mobile");
|
|
|
|
|
2012-12-15 02:00:40 +04:00
|
|
|
if (!newitems) {
|
2016-09-12 21:59:25 +03:00
|
|
|
newitems = this.itemSource();
|
2012-12-15 02:00:40 +04:00
|
|
|
}
|
|
|
|
|
2014-11-13 22:05:28 +03:00
|
|
|
if (this._defaultSort) {
|
|
|
|
newitems.sort = this._defaultSort;
|
|
|
|
}
|
|
|
|
|
2011-04-07 04:18:22 +04:00
|
|
|
if (isMobile) {
|
2010-11-10 00:51:19 +03:00
|
|
|
batchSize = MOBILE_BATCH_SIZE;
|
2010-07-15 02:18:24 +04:00
|
|
|
}
|
2009-02-10 11:57:16 +03:00
|
|
|
newitems.newer = this.lastSync;
|
2011-08-20 04:17:58 +04:00
|
|
|
newitems.full = true;
|
2010-11-10 00:51:19 +03:00
|
|
|
newitems.limit = batchSize;
|
2014-03-14 03:37:25 +04:00
|
|
|
|
2011-05-28 04:32:13 +04:00
|
|
|
// applied => number of items that should be applied.
|
|
|
|
// failed => number of items that failed in this sync.
|
|
|
|
// newFailed => number of items that failed for the first time in this sync.
|
|
|
|
// reconciled => number of items that were reconciled.
|
|
|
|
let count = {applied: 0, failed: 0, newFailed: 0, reconciled: 0};
|
2009-09-11 10:11:33 +04:00
|
|
|
let handled = [];
|
2011-01-28 20:39:12 +03:00
|
|
|
let applyBatch = [];
|
|
|
|
let failed = [];
|
2011-05-28 04:32:13 +04:00
|
|
|
let failedInPreviousSync = this.previousFailed;
|
|
|
|
let fetchBatch = Utils.arrayUnion(this.toFetch, failedInPreviousSync);
|
|
|
|
// Reset previousFailed for each sync since previously failed items may not fail again.
|
|
|
|
this.previousFailed = [];
|
2011-01-28 20:39:12 +03:00
|
|
|
|
2011-06-15 11:03:32 +04:00
|
|
|
// Used (via exceptions) to allow the record handler/reconciliation/etc.
|
|
|
|
// methods to signal that they would like processing of incoming records to
|
|
|
|
// cease.
|
|
|
|
let aborting = undefined;
|
|
|
|
|
2011-01-28 20:39:12 +03:00
|
|
|
function doApplyBatch() {
|
|
|
|
this._tracker.ignoreAll = true;
|
2011-06-15 11:03:32 +04:00
|
|
|
try {
|
|
|
|
failed = failed.concat(this._store.applyIncomingBatch(applyBatch));
|
2016-01-26 18:13:31 +03:00
|
|
|
} catch (ex) {
|
|
|
|
if (Async.isShutdownException(ex)) {
|
|
|
|
throw ex;
|
|
|
|
}
|
2011-06-15 11:03:32 +04:00
|
|
|
// Catch any error that escapes from applyIncomingBatch. At present
|
|
|
|
// those will all be abort events.
|
2016-01-07 09:09:00 +03:00
|
|
|
this._log.warn("Got exception, aborting processIncoming", ex);
|
2011-06-15 11:03:32 +04:00
|
|
|
aborting = ex;
|
|
|
|
}
|
2011-01-28 20:39:12 +03:00
|
|
|
this._tracker.ignoreAll = false;
|
|
|
|
applyBatch = [];
|
|
|
|
}
|
|
|
|
|
|
|
|
function doApplyBatchAndPersistFailed() {
|
|
|
|
// Apply remaining batch.
|
|
|
|
if (applyBatch.length) {
|
|
|
|
doApplyBatch.call(this);
|
|
|
|
}
|
|
|
|
// Persist failed items so we refetch them.
|
|
|
|
if (failed.length) {
|
2011-05-28 04:32:13 +04:00
|
|
|
this.previousFailed = Utils.arrayUnion(failed, this.previousFailed);
|
2011-01-28 20:39:12 +03:00
|
|
|
count.failed += failed.length;
|
|
|
|
this._log.debug("Records that failed to apply: " + failed);
|
|
|
|
failed = [];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-15 03:02:33 +04:00
|
|
|
let key = this.service.collectionKeys.keyForCollection(this.name);
|
|
|
|
|
2011-03-03 02:27:16 +03:00
|
|
|
// Not binding this method to 'this' for performance reasons. It gets
|
|
|
|
// called for every incoming record.
|
|
|
|
let self = this;
|
2012-09-15 03:02:33 +04:00
|
|
|
|
2011-03-03 02:27:16 +03:00
|
|
|
newitems.recordHandler = function(item) {
|
2011-06-15 11:03:32 +04:00
|
|
|
if (aborting) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-11-13 02:18:43 +03:00
|
|
|
// Grab a later last modified if possible
|
2011-03-03 02:27:16 +03:00
|
|
|
if (self.lastModified == null || item.modified > self.lastModified)
|
|
|
|
self.lastModified = item.modified;
|
2009-11-13 02:18:43 +03:00
|
|
|
|
2010-11-30 03:41:17 +03:00
|
|
|
// Track the collection for the WBO.
|
2011-03-03 02:27:16 +03:00
|
|
|
item.collection = self.name;
|
2012-09-15 03:02:33 +04:00
|
|
|
|
2009-09-11 10:11:33 +04:00
|
|
|
// Remember which records were processed
|
|
|
|
handled.push(item.id);
|
|
|
|
|
2009-01-28 00:35:10 +03:00
|
|
|
try {
|
2010-12-10 10:06:44 +03:00
|
|
|
try {
|
2012-09-15 03:02:33 +04:00
|
|
|
item.decrypt(key);
|
2016-01-26 18:13:31 +03:00
|
|
|
} catch (ex) {
|
|
|
|
if (!Utils.isHMACMismatch(ex)) {
|
|
|
|
throw ex;
|
|
|
|
}
|
2011-03-21 02:10:40 +03:00
|
|
|
let strategy = self.handleHMACMismatch(item, true);
|
|
|
|
if (strategy == SyncEngine.kRecoveryStrategy.retry) {
|
|
|
|
// You only get one retry.
|
|
|
|
try {
|
|
|
|
// Try decrypting again, typically because we've got new keys.
|
|
|
|
self._log.info("Trying decrypt again...");
|
2012-09-15 03:02:33 +04:00
|
|
|
key = self.service.collectionKeys.keyForCollection(self.name);
|
|
|
|
item.decrypt(key);
|
2011-03-21 02:10:40 +03:00
|
|
|
strategy = null;
|
2016-01-26 18:13:31 +03:00
|
|
|
} catch (ex) {
|
|
|
|
if (!Utils.isHMACMismatch(ex)) {
|
|
|
|
throw ex;
|
|
|
|
}
|
2011-03-21 02:10:40 +03:00
|
|
|
strategy = self.handleHMACMismatch(item, false);
|
|
|
|
}
|
|
|
|
}
|
2012-09-15 03:02:33 +04:00
|
|
|
|
2011-03-21 02:10:40 +03:00
|
|
|
switch (strategy) {
|
|
|
|
case null:
|
|
|
|
// Retry succeeded! No further handling.
|
|
|
|
break;
|
|
|
|
case SyncEngine.kRecoveryStrategy.retry:
|
|
|
|
self._log.debug("Ignoring second retry suggestion.");
|
|
|
|
// Fall through to error case.
|
|
|
|
case SyncEngine.kRecoveryStrategy.error:
|
2016-01-07 09:09:00 +03:00
|
|
|
self._log.warn("Error decrypting record", ex);
|
2015-09-16 04:18:04 +03:00
|
|
|
self._noteApplyFailure();
|
2011-03-21 02:10:40 +03:00
|
|
|
failed.push(item.id);
|
|
|
|
return;
|
|
|
|
case SyncEngine.kRecoveryStrategy.ignore:
|
|
|
|
self._log.debug("Ignoring record " + item.id +
|
|
|
|
" with bad HMAC: already handled.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2011-01-28 20:39:12 +03:00
|
|
|
} catch (ex) {
|
2016-08-22 07:36:29 +03:00
|
|
|
if (Async.isShutdownException(ex)) {
|
|
|
|
throw ex;
|
|
|
|
}
|
2016-01-07 09:09:00 +03:00
|
|
|
self._log.warn("Error decrypting record", ex);
|
2015-09-16 04:18:04 +03:00
|
|
|
self._noteApplyFailure();
|
2011-01-28 20:39:12 +03:00
|
|
|
failed.push(item.id);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let shouldApply;
|
|
|
|
try {
|
2011-03-03 02:27:16 +03:00
|
|
|
shouldApply = self._reconcile(item);
|
2016-01-26 18:13:31 +03:00
|
|
|
} catch (ex) {
|
|
|
|
if (ex.code == Engine.prototype.eEngineAbortApplyIncoming) {
|
|
|
|
self._log.warn("Reconciliation failed: aborting incoming processing.");
|
|
|
|
self._noteApplyFailure();
|
|
|
|
failed.push(item.id);
|
|
|
|
aborting = ex.cause;
|
|
|
|
} else if (!Async.isShutdownException(ex)) {
|
|
|
|
self._log.warn("Failed to reconcile incoming record " + item.id, ex);
|
|
|
|
self._noteApplyFailure();
|
|
|
|
failed.push(item.id);
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
throw ex;
|
|
|
|
}
|
2011-01-28 20:39:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (shouldApply) {
|
|
|
|
count.applied++;
|
|
|
|
applyBatch.push(item);
|
|
|
|
} else {
|
|
|
|
count.reconciled++;
|
2011-03-03 02:27:16 +03:00
|
|
|
self._log.trace("Skipping reconciled incoming item " + item.id);
|
2011-01-28 20:39:12 +03:00
|
|
|
}
|
|
|
|
|
2011-03-03 02:27:16 +03:00
|
|
|
if (applyBatch.length == self.applyIncomingBatchSize) {
|
|
|
|
doApplyBatch.call(self);
|
2009-09-10 23:41:38 +04:00
|
|
|
}
|
2011-03-08 00:07:59 +03:00
|
|
|
self._store._sleep(0);
|
2011-03-03 02:27:16 +03:00
|
|
|
};
|
2009-07-23 03:38:34 +04:00
|
|
|
|
2009-09-11 10:11:33 +04:00
|
|
|
// Only bother getting data from the server if there's new things
|
2009-11-13 02:18:43 +03:00
|
|
|
if (this.lastModified == null || this.lastModified > this.lastSync) {
|
2009-09-11 10:11:33 +04:00
|
|
|
let resp = newitems.get();
|
2011-01-28 20:39:12 +03:00
|
|
|
doApplyBatchAndPersistFailed.call(this);
|
2009-09-16 05:38:52 +04:00
|
|
|
if (!resp.success) {
|
|
|
|
resp.failureCode = ENGINE_DOWNLOAD_FAIL;
|
2009-09-11 10:11:33 +04:00
|
|
|
throw resp;
|
2009-09-16 05:38:52 +04:00
|
|
|
}
|
2011-06-15 11:03:32 +04:00
|
|
|
|
|
|
|
if (aborting) {
|
|
|
|
throw aborting;
|
|
|
|
}
|
2009-09-11 10:11:33 +04:00
|
|
|
}
|
|
|
|
|
2010-11-10 00:51:19 +03:00
|
|
|
// Mobile: check if we got the maximum that we requested; get the rest if so.
|
2009-09-11 10:11:33 +04:00
|
|
|
if (handled.length == newitems.limit) {
|
2012-09-15 03:02:32 +04:00
|
|
|
let guidColl = new Collection(this.engineURL, null, this.service);
|
|
|
|
|
2010-12-07 04:25:35 +03:00
|
|
|
// Sort and limit so that on mobile we only get the last X records.
|
|
|
|
guidColl.limit = this.downloadLimit;
|
2009-09-11 10:11:33 +04:00
|
|
|
guidColl.newer = this.lastSync;
|
2011-01-27 08:34:31 +03:00
|
|
|
|
2010-12-07 04:25:35 +03:00
|
|
|
// index: Orders by the sortindex descending (highest weight first).
|
|
|
|
guidColl.sort = "index";
|
2009-09-11 10:11:33 +04:00
|
|
|
|
|
|
|
let guids = guidColl.get();
|
|
|
|
if (!guids.success)
|
|
|
|
throw guids;
|
|
|
|
|
|
|
|
// Figure out which guids weren't just fetched then remove any guids that
|
|
|
|
// were already waiting and prepend the new ones
|
|
|
|
let extra = Utils.arraySub(guids.obj, handled);
|
2011-01-28 20:39:12 +03:00
|
|
|
if (extra.length > 0) {
|
|
|
|
fetchBatch = Utils.arrayUnion(extra, fetchBatch);
|
|
|
|
this.toFetch = Utils.arrayUnion(extra, this.toFetch);
|
|
|
|
}
|
2011-01-27 08:34:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fast-foward the lastSync timestamp since we have stored the
|
|
|
|
// remaining items in toFetch.
|
|
|
|
if (this.lastSync < this.lastModified) {
|
|
|
|
this.lastSync = this.lastModified;
|
2009-09-11 10:11:33 +04:00
|
|
|
}
|
|
|
|
|
2011-04-07 04:18:22 +04:00
|
|
|
// Process any backlog of GUIDs.
|
|
|
|
// At this point we impose an upper limit on the number of items to fetch
|
|
|
|
// in a single request, even for desktop, to avoid hitting URI limits.
|
|
|
|
batchSize = isMobile ? this.mobileGUIDFetchBatchSize :
|
|
|
|
this.guidFetchBatchSize;
|
|
|
|
|
2011-06-15 11:03:32 +04:00
|
|
|
while (fetchBatch.length && !aborting) {
|
2009-09-11 10:11:33 +04:00
|
|
|
// Reuse the original query, but get rid of the restricting params
|
2011-01-27 08:34:31 +03:00
|
|
|
// and batch remaining records.
|
2009-09-11 10:11:33 +04:00
|
|
|
newitems.limit = 0;
|
|
|
|
newitems.newer = 0;
|
2011-01-28 20:39:12 +03:00
|
|
|
newitems.ids = fetchBatch.slice(0, batchSize);
|
2009-09-11 10:11:33 +04:00
|
|
|
|
2009-09-11 19:24:42 +04:00
|
|
|
// Reuse the existing record handler set earlier
|
2009-09-11 10:11:33 +04:00
|
|
|
let resp = newitems.get();
|
2009-09-16 05:38:52 +04:00
|
|
|
if (!resp.success) {
|
|
|
|
resp.failureCode = ENGINE_DOWNLOAD_FAIL;
|
2009-09-11 10:11:33 +04:00
|
|
|
throw resp;
|
2009-09-16 05:38:52 +04:00
|
|
|
}
|
2009-07-23 03:38:34 +04:00
|
|
|
|
2011-01-28 20:39:12 +03:00
|
|
|
// This batch was successfully applied. Not using
|
|
|
|
// doApplyBatchAndPersistFailed() here to avoid writing toFetch twice.
|
|
|
|
fetchBatch = fetchBatch.slice(batchSize);
|
2011-05-28 04:32:13 +04:00
|
|
|
this.toFetch = Utils.arraySub(this.toFetch, newitems.ids);
|
|
|
|
this.previousFailed = Utils.arrayUnion(this.previousFailed, failed);
|
|
|
|
if (failed.length) {
|
|
|
|
count.failed += failed.length;
|
|
|
|
this._log.debug("Records that failed to apply: " + failed);
|
|
|
|
}
|
2011-01-28 20:39:12 +03:00
|
|
|
failed = [];
|
2011-06-15 11:03:32 +04:00
|
|
|
|
|
|
|
if (aborting) {
|
|
|
|
throw aborting;
|
|
|
|
}
|
|
|
|
|
2011-01-27 08:34:31 +03:00
|
|
|
if (this.lastSync < this.lastModified) {
|
|
|
|
this.lastSync = this.lastModified;
|
|
|
|
}
|
|
|
|
}
|
2008-12-31 10:52:20 +03:00
|
|
|
|
2011-01-28 20:39:12 +03:00
|
|
|
// Apply remaining items.
|
|
|
|
doApplyBatchAndPersistFailed.call(this);
|
|
|
|
|
2015-09-16 04:18:04 +03:00
|
|
|
count.newFailed = this.previousFailed.reduce((count, engine) => {
|
|
|
|
if (failedInPreviousSync.indexOf(engine) == -1) {
|
|
|
|
count++;
|
|
|
|
this._noteApplyNewFailure();
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}, 0);
|
2012-03-07 02:27:56 +04:00
|
|
|
count.succeeded = Math.max(0, count.applied - count.failed);
|
2011-01-28 20:39:12 +03:00
|
|
|
this._log.info(["Records:",
|
|
|
|
count.applied, "applied,",
|
2012-03-07 02:27:56 +04:00
|
|
|
count.succeeded, "successfully,",
|
2011-01-28 20:39:12 +03:00
|
|
|
count.failed, "failed to apply,",
|
2011-05-28 04:32:13 +04:00
|
|
|
count.newFailed, "newly failed to apply,",
|
2011-01-28 20:39:12 +03:00
|
|
|
count.reconciled, "reconciled."].join(" "));
|
2011-06-27 17:24:33 +04:00
|
|
|
Observers.notify("weave:engine:sync:applied", count, this.name);
|
2008-12-05 11:39:54 +03:00
|
|
|
},
|
|
|
|
|
2015-09-16 04:18:04 +03:00
|
|
|
_noteApplyFailure: function () {
|
2016-04-01 03:35:00 +03:00
|
|
|
// here would be a good place to record telemetry...
|
2015-09-16 04:18:04 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
_noteApplyNewFailure: function () {
|
2016-04-01 03:35:00 +03:00
|
|
|
// here would be a good place to record telemetry...
|
2015-09-16 04:18:04 +03:00
|
|
|
},
|
|
|
|
|
2009-07-28 21:06:02 +04:00
|
|
|
/**
|
2009-09-01 04:27:30 +04:00
|
|
|
* Find a GUID of an item that is a duplicate of the incoming item but happens
|
|
|
|
* to have a different GUID
|
2009-07-28 21:06:02 +04:00
|
|
|
*
|
2009-09-01 04:27:30 +04:00
|
|
|
* @return GUID of the similar item; falsy otherwise
|
2009-07-28 21:06:02 +04:00
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
_findDupe: function (item) {
|
2009-09-01 04:27:30 +04:00
|
|
|
// By default, assume there's no dupe items for the engine
|
2009-07-28 21:06:02 +04:00
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
_deleteId: function (id) {
|
2009-09-01 04:51:26 +04:00
|
|
|
this._tracker.removeChangedID(id);
|
|
|
|
|
|
|
|
// Remember this id to delete at the end of sync
|
|
|
|
if (this._delete.ids == null)
|
|
|
|
this._delete.ids = [id];
|
|
|
|
else
|
|
|
|
this._delete.ids.push(id);
|
|
|
|
},
|
|
|
|
|
2016-07-05 09:02:19 +03:00
|
|
|
_switchItemToDupe(localDupeGUID, incomingItem) {
|
|
|
|
// The local, duplicate ID is always deleted on the server.
|
|
|
|
this._deleteId(localDupeGUID);
|
|
|
|
|
|
|
|
// We unconditionally change the item's ID in case the engine knows of
|
|
|
|
// an item but doesn't expose it through itemExists. If the API
|
|
|
|
// contract were stronger, this could be changed.
|
|
|
|
this._log.debug("Switching local ID to incoming: " + localDupeGUID + " -> " +
|
|
|
|
incomingItem.id);
|
|
|
|
this._store.changeItemID(localDupeGUID, incomingItem.id);
|
|
|
|
},
|
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
/**
|
|
|
|
* Reconcile incoming record with local state.
|
|
|
|
*
|
|
|
|
* This function essentially determines whether to apply an incoming record.
|
|
|
|
*
|
|
|
|
* @param item
|
|
|
|
* Record from server to be tested for application.
|
|
|
|
* @return boolean
|
|
|
|
* Truthy if incoming record should be applied. False if not.
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
_reconcile: function (item) {
|
2013-08-26 22:55:58 +04:00
|
|
|
if (this._log.level <= Log.Level.Trace) {
|
2012-01-26 01:32:05 +04:00
|
|
|
this._log.trace("Incoming: " + item);
|
|
|
|
}
|
|
|
|
|
|
|
|
// We start reconciling by collecting a bunch of state. We do this here
|
|
|
|
// because some state may change during the course of this function and we
|
|
|
|
// need to operate on the original values.
|
|
|
|
let existsLocally = this._store.itemExists(item.id);
|
2016-09-06 21:39:13 +03:00
|
|
|
let locallyModified = this._modified.has(item.id);
|
2012-01-26 01:32:05 +04:00
|
|
|
|
|
|
|
// TODO Handle clock drift better. Tracked in bug 721181.
|
|
|
|
let remoteAge = AsyncResource.serverTime - item.modified;
|
|
|
|
let localAge = locallyModified ?
|
2016-09-06 21:39:13 +03:00
|
|
|
(Date.now() / 1000 - this._modified.getModifiedTimestamp(item.id)) : null;
|
2012-01-26 01:32:05 +04:00
|
|
|
let remoteIsNewer = remoteAge < localAge;
|
|
|
|
|
|
|
|
this._log.trace("Reconciling " + item.id + ". exists=" +
|
|
|
|
existsLocally + "; modified=" + locallyModified +
|
|
|
|
"; local age=" + localAge + "; incoming age=" +
|
|
|
|
remoteAge);
|
|
|
|
|
|
|
|
// We handle deletions first so subsequent logic doesn't have to check
|
|
|
|
// deleted flags.
|
|
|
|
if (item.deleted) {
|
|
|
|
// If the item doesn't exist locally, there is nothing for us to do. We
|
|
|
|
// can't check for duplicates because the incoming record has no data
|
|
|
|
// which can be used for duplicate detection.
|
|
|
|
if (!existsLocally) {
|
|
|
|
this._log.trace("Ignoring incoming item because it was deleted and " +
|
|
|
|
"the item does not exist locally.");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We decide whether to process the deletion by comparing the record
|
|
|
|
// ages. If the item is not modified locally, the remote side wins and
|
|
|
|
// the deletion is processed. If it is modified locally, we take the
|
|
|
|
// newer record.
|
|
|
|
if (!locallyModified) {
|
|
|
|
this._log.trace("Applying incoming delete because the local item " +
|
|
|
|
"exists and isn't modified.");
|
|
|
|
return true;
|
|
|
|
}
|
2010-04-02 02:29:16 +04:00
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
// TODO As part of bug 720592, determine whether we should do more here.
|
|
|
|
// In the case where the local changes are newer, it is quite possible
|
|
|
|
// that the local client will restore data a remote client had tried to
|
|
|
|
// delete. There might be a good reason for that delete and it might be
|
|
|
|
// enexpected for this client to restore that data.
|
|
|
|
this._log.trace("Incoming record is deleted but we had local changes. " +
|
|
|
|
"Applying the youngest record.");
|
|
|
|
return remoteIsNewer;
|
2009-09-09 10:33:15 +04:00
|
|
|
}
|
2012-01-26 01:32:05 +04:00
|
|
|
|
|
|
|
// At this point the incoming record is not for a deletion and must have
|
|
|
|
// data. If the incoming record does not exist locally, we check for a local
|
|
|
|
// duplicate existing under a different ID. The default implementation of
|
|
|
|
// _findDupe() is empty, so engines have to opt in to this functionality.
|
|
|
|
//
|
|
|
|
// If we find a duplicate, we change the local ID to the incoming ID and we
|
|
|
|
// refresh the metadata collected above. See bug 710448 for the history
|
|
|
|
// of this logic.
|
|
|
|
if (!existsLocally) {
|
2016-07-05 09:02:19 +03:00
|
|
|
let localDupeGUID = this._findDupe(item);
|
|
|
|
if (localDupeGUID) {
|
|
|
|
this._log.trace("Local item " + localDupeGUID + " is a duplicate for " +
|
2012-01-26 01:32:05 +04:00
|
|
|
"incoming item " + item.id);
|
|
|
|
|
|
|
|
// The current API contract does not mandate that the ID returned by
|
|
|
|
// _findDupe() actually exists. Therefore, we have to perform this
|
|
|
|
// check.
|
2016-07-05 09:02:19 +03:00
|
|
|
existsLocally = this._store.itemExists(localDupeGUID);
|
2012-01-26 01:32:05 +04:00
|
|
|
|
|
|
|
// If the local item was modified, we carry its metadata forward so
|
|
|
|
// appropriate reconciling can be performed.
|
2016-07-05 09:02:19 +03:00
|
|
|
if (this._modified.has(localDupeGUID)) {
|
2012-01-26 01:32:05 +04:00
|
|
|
locallyModified = true;
|
2016-07-05 09:02:19 +03:00
|
|
|
localAge = this._tracker._now() - this._modified.getModifiedTimestamp(localDupeGUID);
|
2012-01-26 01:32:05 +04:00
|
|
|
remoteIsNewer = remoteAge < localAge;
|
|
|
|
|
2016-07-05 09:02:19 +03:00
|
|
|
this._modified.swap(localDupeGUID, item.id);
|
2012-01-26 01:32:05 +04:00
|
|
|
} else {
|
|
|
|
locallyModified = false;
|
|
|
|
localAge = null;
|
|
|
|
}
|
|
|
|
|
2016-07-05 09:02:19 +03:00
|
|
|
// Tell the engine to do whatever it needs to switch the items.
|
|
|
|
this._switchItemToDupe(localDupeGUID, item);
|
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
this._log.debug("Local item after duplication: age=" + localAge +
|
|
|
|
"; modified=" + locallyModified + "; exists=" +
|
|
|
|
existsLocally);
|
|
|
|
} else {
|
|
|
|
this._log.trace("No duplicate found for incoming item: " + item.id);
|
|
|
|
}
|
2009-09-09 10:33:15 +04:00
|
|
|
}
|
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
// At this point we've performed duplicate detection. But, nothing here
|
|
|
|
// should depend on duplicate detection as the above should have updated
|
|
|
|
// state seamlessly.
|
|
|
|
|
|
|
|
if (!existsLocally) {
|
|
|
|
// If the item doesn't exist locally and we have no local modifications
|
|
|
|
// to the item (implying that it was not deleted), always apply the remote
|
|
|
|
// item.
|
|
|
|
if (!locallyModified) {
|
|
|
|
this._log.trace("Applying incoming because local item does not exist " +
|
|
|
|
"and was not deleted.");
|
|
|
|
return true;
|
|
|
|
}
|
2009-08-26 03:15:05 +04:00
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
// If the item was modified locally but isn't present, it must have
|
|
|
|
// been deleted. If the incoming record is younger, we restore from
|
|
|
|
// that record.
|
|
|
|
if (remoteIsNewer) {
|
|
|
|
this._log.trace("Applying incoming because local item was deleted " +
|
|
|
|
"before the incoming item was changed.");
|
2016-09-06 21:39:13 +03:00
|
|
|
this._modified.delete(item.id);
|
2012-01-26 01:32:05 +04:00
|
|
|
return true;
|
2010-04-02 02:54:53 +04:00
|
|
|
}
|
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
this._log.trace("Ignoring incoming item because the local item's " +
|
|
|
|
"deletion is newer.");
|
|
|
|
return false;
|
2008-12-30 10:28:17 +03:00
|
|
|
}
|
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
// If the remote and local records are the same, there is nothing to be
|
|
|
|
// done, so we don't do anything. In the ideal world, this logic wouldn't
|
|
|
|
// be here and the engine would take a record and apply it. The reason we
|
|
|
|
// want to defer this logic is because it would avoid a redundant and
|
|
|
|
// possibly expensive dip into the storage layer to query item state.
|
|
|
|
// This should get addressed in the async rewrite, so we ignore it for now.
|
|
|
|
let localRecord = this._createRecord(item.id);
|
|
|
|
let recordsEqual = Utils.deepEquals(item.cleartext,
|
|
|
|
localRecord.cleartext);
|
|
|
|
|
|
|
|
// If the records are the same, we don't need to do anything. This does
|
|
|
|
// potentially throw away a local modification time. But, if the records
|
|
|
|
// are the same, does it matter?
|
|
|
|
if (recordsEqual) {
|
|
|
|
this._log.trace("Ignoring incoming item because the local item is " +
|
|
|
|
"identical.");
|
|
|
|
|
2016-09-06 21:39:13 +03:00
|
|
|
this._modified.delete(item.id);
|
2012-01-26 01:32:05 +04:00
|
|
|
return false;
|
|
|
|
}
|
2008-11-20 03:20:25 +03:00
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
// At this point the records are different.
|
2009-01-09 08:33:37 +03:00
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
// If we have no local modifications, always take the server record.
|
|
|
|
if (!locallyModified) {
|
|
|
|
this._log.trace("Applying incoming record because no local conflicts.");
|
2011-12-14 03:46:54 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-01-26 01:32:05 +04:00
|
|
|
// At this point, records are different and the local record is modified.
|
|
|
|
// We resolve conflicts by record age, where the newest one wins. This does
|
|
|
|
// result in data loss and should be handled by giving the engine an
|
|
|
|
// opportunity to merge the records. Bug 720592 tracks this feature.
|
|
|
|
this._log.warn("DATA LOSS: Both local and remote changes to record: " +
|
|
|
|
item.id);
|
|
|
|
return remoteIsNewer;
|
2008-12-05 11:39:54 +03:00
|
|
|
},
|
2008-11-24 19:04:14 +03:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
// Upload outgoing records.
|
|
|
|
_uploadOutgoing: function () {
|
2011-03-03 02:27:16 +03:00
|
|
|
this._log.trace("Uploading local changes to server.");
|
2012-01-26 01:32:05 +04:00
|
|
|
|
2016-09-06 21:39:13 +03:00
|
|
|
let modifiedIDs = this._modified.ids();
|
2012-01-26 01:32:05 +04:00
|
|
|
if (modifiedIDs.length) {
|
|
|
|
this._log.trace("Preparing " + modifiedIDs.length +
|
2010-11-24 08:21:31 +03:00
|
|
|
" outgoing records");
|
2009-09-11 09:57:36 +04:00
|
|
|
|
2016-07-11 20:37:23 +03:00
|
|
|
let counts = { sent: modifiedIDs.length, failed: 0 };
|
|
|
|
|
2008-12-23 22:30:31 +03:00
|
|
|
// collection we'll upload
|
2012-09-15 03:02:32 +04:00
|
|
|
let up = new Collection(this.engineURL, null, this.service);
|
2016-08-24 23:02:14 +03:00
|
|
|
|
|
|
|
let failed = [];
|
|
|
|
let successful = [];
|
|
|
|
let handleResponse = (resp, batchOngoing = false) => {
|
|
|
|
// Note: We don't want to update this.lastSync, or this._modified until
|
|
|
|
// the batch is complete, however we want to remember success/failure
|
|
|
|
// indicators for when that happens.
|
2009-09-16 05:38:52 +04:00
|
|
|
if (!resp.success) {
|
|
|
|
this._log.debug("Uploading records failed: " + resp);
|
2016-08-24 23:02:14 +03:00
|
|
|
resp.failureCode = resp.status == 412 ? ENGINE_BATCH_INTERRUPTED : ENGINE_UPLOAD_FAIL;
|
2009-08-27 02:32:46 +04:00
|
|
|
throw resp;
|
2009-09-16 05:38:52 +04:00
|
|
|
}
|
2009-08-26 05:04:46 +04:00
|
|
|
|
2010-11-11 22:00:35 +03:00
|
|
|
// Update server timestamp from the upload.
|
2016-08-24 23:02:14 +03:00
|
|
|
failed = failed.concat(Object.keys(resp.obj.failed));
|
|
|
|
successful = successful.concat(resp.obj.success);
|
|
|
|
|
|
|
|
if (batchOngoing) {
|
|
|
|
// Nothing to do yet
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Advance lastSync since we've finished the batch.
|
2010-06-02 02:15:53 +04:00
|
|
|
let modified = resp.headers["x-weave-timestamp"];
|
2016-08-24 23:02:14 +03:00
|
|
|
if (modified > this.lastSync) {
|
2009-08-26 05:04:46 +04:00
|
|
|
this.lastSync = modified;
|
2016-08-24 23:02:14 +03:00
|
|
|
}
|
|
|
|
if (failed.length && this._log.level <= Log.Level.Debug) {
|
2010-07-31 15:28:00 +04:00
|
|
|
this._log.debug("Records that will be uploaded again because "
|
|
|
|
+ "the server couldn't store them: "
|
2016-08-24 23:02:14 +03:00
|
|
|
+ failed.join(", "));
|
|
|
|
}
|
|
|
|
|
|
|
|
counts.failed += failed.length;
|
2010-07-31 15:28:00 +04:00
|
|
|
|
2016-08-24 23:02:14 +03:00
|
|
|
for (let id of successful) {
|
2016-09-06 21:39:13 +03:00
|
|
|
this._modified.delete(id);
|
2010-11-24 08:21:31 +03:00
|
|
|
}
|
2016-08-02 20:09:30 +03:00
|
|
|
|
2016-08-24 23:02:14 +03:00
|
|
|
this._onRecordsWritten(successful, failed);
|
2010-11-24 08:21:31 +03:00
|
|
|
|
2016-08-24 23:02:14 +03:00
|
|
|
// clear for next batch
|
|
|
|
failed.length = 0;
|
|
|
|
successful.length = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
let postQueue = up.newPostQueue(this._log, this.lastSync, handleResponse);
|
2008-12-23 22:30:31 +03:00
|
|
|
|
2015-10-18 21:52:58 +03:00
|
|
|
for (let id of modifiedIDs) {
|
2016-01-21 04:30:25 +03:00
|
|
|
let out;
|
|
|
|
let ok = false;
|
2010-01-06 20:59:05 +03:00
|
|
|
try {
|
2016-01-21 04:30:25 +03:00
|
|
|
out = this._createRecord(id);
|
2013-08-26 22:55:58 +04:00
|
|
|
if (this._log.level <= Log.Level.Trace)
|
2010-01-06 20:59:05 +03:00
|
|
|
this._log.trace("Outgoing: " + out);
|
2009-07-23 03:21:33 +04:00
|
|
|
|
2012-09-15 03:02:33 +04:00
|
|
|
out.encrypt(this.service.collectionKeys.keyForCollection(this.name));
|
2016-01-21 04:30:25 +03:00
|
|
|
ok = true;
|
2016-01-26 18:13:31 +03:00
|
|
|
} catch (ex) {
|
|
|
|
if (Async.isShutdownException(ex)) {
|
|
|
|
throw ex;
|
|
|
|
}
|
2016-01-07 09:09:00 +03:00
|
|
|
this._log.warn("Error creating record", ex);
|
2010-01-06 20:59:05 +03:00
|
|
|
}
|
2016-01-21 04:30:25 +03:00
|
|
|
if (ok) {
|
2016-08-24 23:02:14 +03:00
|
|
|
let { enqueued, error } = postQueue.enqueue(out);
|
|
|
|
if (!enqueued) {
|
|
|
|
++counts.failed;
|
|
|
|
if (!this.allowSkippedRecord) {
|
|
|
|
throw error;
|
|
|
|
}
|
|
|
|
}
|
2016-01-21 04:30:25 +03:00
|
|
|
}
|
2011-03-08 00:07:59 +03:00
|
|
|
this._store._sleep(0);
|
2008-12-23 22:30:31 +03:00
|
|
|
}
|
2016-08-24 23:02:14 +03:00
|
|
|
postQueue.flush(true);
|
2016-07-11 20:37:23 +03:00
|
|
|
Observers.notify("weave:engine:sync:uploaded", counts, this.name);
|
2008-11-08 13:00:33 +03:00
|
|
|
}
|
2008-07-26 04:02:43 +04:00
|
|
|
},
|
|
|
|
|
2016-08-02 20:09:30 +03:00
|
|
|
_onRecordsWritten(succeeded, failed) {
|
|
|
|
// Implement this method to take specific actions against successfully
|
|
|
|
// uploaded records and failed records.
|
|
|
|
},
|
|
|
|
|
2008-12-05 11:39:54 +03:00
|
|
|
// Any cleanup necessary.
|
|
|
|
// Save the current snapshot so as to calculate changes at next sync
|
2013-12-19 09:42:17 +04:00
|
|
|
_syncFinish: function () {
|
2009-07-23 08:40:18 +04:00
|
|
|
this._log.trace("Finishing up sync");
|
2008-12-05 11:39:54 +03:00
|
|
|
this._tracker.resetScore();
|
2009-09-01 04:30:34 +04:00
|
|
|
|
2009-10-13 05:11:31 +04:00
|
|
|
let doDelete = Utils.bind2(this, function(key, val) {
|
2012-09-15 03:02:32 +04:00
|
|
|
let coll = new Collection(this.engineURL, this._recordObj, this.service);
|
2009-10-13 05:11:31 +04:00
|
|
|
coll[key] = val;
|
|
|
|
coll.delete();
|
|
|
|
});
|
|
|
|
|
2016-08-09 05:40:23 +03:00
|
|
|
for (let [key, val] of Object.entries(this._delete)) {
|
2009-09-01 04:30:34 +04:00
|
|
|
// Remove the key for future uses
|
|
|
|
delete this._delete[key];
|
|
|
|
|
2009-10-13 05:11:31 +04:00
|
|
|
// Send a simple delete for the property
|
|
|
|
if (key != "ids" || val.length <= 100)
|
|
|
|
doDelete(key, val);
|
|
|
|
else {
|
|
|
|
// For many ids, split into chunks of at most 100
|
|
|
|
while (val.length > 0) {
|
|
|
|
doDelete(key, val.slice(0, 100));
|
|
|
|
val = val.slice(100);
|
|
|
|
}
|
|
|
|
}
|
2009-09-01 04:30:34 +04:00
|
|
|
}
|
2008-11-07 06:18:07 +03:00
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
_syncCleanup: function () {
|
|
|
|
if (!this._modified) {
|
2010-11-11 22:00:35 +03:00
|
|
|
return;
|
2013-12-19 09:42:17 +04:00
|
|
|
}
|
2010-11-11 22:00:35 +03:00
|
|
|
|
2010-11-24 08:21:31 +03:00
|
|
|
// Mark failed WBOs as changed again so they are reuploaded next time.
|
2016-09-06 21:39:13 +03:00
|
|
|
this.trackRemainingChanges();
|
|
|
|
this._modified.clear();
|
2010-11-11 22:00:35 +03:00
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
_sync: function () {
|
2008-12-05 11:55:19 +03:00
|
|
|
try {
|
2009-06-05 10:48:27 +04:00
|
|
|
this._syncStartup();
|
2009-02-22 11:04:58 +03:00
|
|
|
Observers.notify("weave:engine:sync:status", "process-incoming");
|
2009-06-05 11:39:35 +04:00
|
|
|
this._processIncoming();
|
2009-02-22 11:04:58 +03:00
|
|
|
Observers.notify("weave:engine:sync:status", "upload-outgoing");
|
2009-06-05 10:48:27 +04:00
|
|
|
this._uploadOutgoing();
|
|
|
|
this._syncFinish();
|
2010-11-24 08:21:31 +03:00
|
|
|
} finally {
|
|
|
|
this._syncCleanup();
|
2008-12-05 11:55:19 +03:00
|
|
|
}
|
2008-12-05 11:39:54 +03:00
|
|
|
},
|
2008-08-09 01:42:57 +04:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
canDecrypt: function () {
|
2009-10-13 03:22:54 +04:00
|
|
|
// Report failure even if there's nothing to decrypt
|
|
|
|
let canDecrypt = false;
|
|
|
|
|
|
|
|
// Fetch the most recently uploaded record and try to decrypt it
|
2012-09-15 03:02:32 +04:00
|
|
|
let test = new Collection(this.engineURL, this._recordObj, this.service);
|
2009-10-13 03:22:54 +04:00
|
|
|
test.limit = 1;
|
|
|
|
test.sort = "newest";
|
|
|
|
test.full = true;
|
2012-09-15 03:02:33 +04:00
|
|
|
|
|
|
|
let key = this.service.collectionKeys.keyForCollection(this.name);
|
|
|
|
test.recordHandler = function recordHandler(record) {
|
|
|
|
record.decrypt(key);
|
2009-10-13 03:22:54 +04:00
|
|
|
canDecrypt = true;
|
2012-09-15 03:02:33 +04:00
|
|
|
}.bind(this);
|
2009-10-13 03:22:54 +04:00
|
|
|
|
|
|
|
// Any failure fetching/decrypting will just result in false
|
|
|
|
try {
|
|
|
|
this._log.trace("Trying to decrypt a record from the server..");
|
|
|
|
test.get();
|
2016-01-26 18:13:31 +03:00
|
|
|
} catch (ex) {
|
|
|
|
if (Async.isShutdownException(ex)) {
|
|
|
|
throw ex;
|
|
|
|
}
|
2016-01-07 09:09:00 +03:00
|
|
|
this._log.debug("Failed test decrypt", ex);
|
2009-10-13 03:22:54 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return canDecrypt;
|
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
_resetClient: function () {
|
2009-02-27 09:36:14 +03:00
|
|
|
this.resetLastSync();
|
2011-05-28 04:32:13 +04:00
|
|
|
this.previousFailed = [];
|
2011-01-27 08:34:31 +03:00
|
|
|
this.toFetch = [];
|
2010-09-07 20:44:01 +04:00
|
|
|
},
|
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
wipeServer: function () {
|
2012-09-15 03:02:32 +04:00
|
|
|
let response = this.service.resource(this.engineURL).delete();
|
2011-10-28 09:25:00 +04:00
|
|
|
if (response.status != 200 && response.status != 404) {
|
|
|
|
throw response;
|
|
|
|
}
|
2010-09-07 20:44:01 +04:00
|
|
|
this._resetClient();
|
2010-12-10 10:06:44 +03:00
|
|
|
},
|
2011-03-21 02:10:40 +03:00
|
|
|
|
2013-12-19 09:42:17 +04:00
|
|
|
removeClientData: function () {
|
2011-04-09 01:51:55 +04:00
|
|
|
// Implement this method in engines that store client specific data
|
|
|
|
// on the server.
|
|
|
|
},
|
|
|
|
|
2011-03-21 02:10:40 +03:00
|
|
|
/*
|
|
|
|
* Decide on (and partially effect) an error-handling strategy.
|
|
|
|
*
|
|
|
|
* Asks the Service to respond to an HMAC error, which might result in keys
|
|
|
|
* being downloaded. That call returns true if an action which might allow a
|
|
|
|
* retry to occur.
|
|
|
|
*
|
|
|
|
* If `mayRetry` is truthy, and the Service suggests a retry,
|
|
|
|
* handleHMACMismatch returns kRecoveryStrategy.retry. Otherwise, it returns
|
|
|
|
* kRecoveryStrategy.error.
|
|
|
|
*
|
|
|
|
* Subclasses of SyncEngine can override this method to allow for different
|
|
|
|
* behavior -- e.g., to delete and ignore erroneous entries.
|
|
|
|
*
|
|
|
|
* All return values will be part of the kRecoveryStrategy enumeration.
|
|
|
|
*/
|
2013-12-19 09:42:17 +04:00
|
|
|
handleHMACMismatch: function (item, mayRetry) {
|
2011-03-21 02:10:40 +03:00
|
|
|
// By default we either try again, or bail out noisily.
|
2012-08-30 01:43:41 +04:00
|
|
|
return (this.service.handleHMACEvent() && mayRetry) ?
|
2011-03-21 02:10:40 +03:00
|
|
|
SyncEngine.kRecoveryStrategy.retry :
|
|
|
|
SyncEngine.kRecoveryStrategy.error;
|
2016-09-06 21:39:13 +03:00
|
|
|
},
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns a changeset containing all items in the store. The default
|
|
|
|
* implementation returns a changeset with timestamps from long ago, to
|
|
|
|
* ensure we always use the remote version if one exists.
|
|
|
|
*
|
|
|
|
* This function is only called for the first sync. Subsequent syncs call
|
|
|
|
* `pullNewChanges`.
|
|
|
|
*
|
|
|
|
* @return A `Changeset` object.
|
|
|
|
*/
|
|
|
|
pullAllChanges() {
|
|
|
|
let changeset = new Changeset();
|
|
|
|
for (let id in this._store.getAllIDs()) {
|
|
|
|
changeset.set(id, 0);
|
|
|
|
}
|
|
|
|
return changeset;
|
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns a changeset containing entries for all currently tracked items.
|
|
|
|
* The default implementation returns a changeset with timestamps indicating
|
|
|
|
* when the item was added to the tracker.
|
|
|
|
*
|
|
|
|
* @return A `Changeset` object.
|
|
|
|
*/
|
|
|
|
pullNewChanges() {
|
|
|
|
return new Changeset(this.getChangedIDs());
|
|
|
|
},
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Adds all remaining changeset entries back to the tracker, typically for
|
|
|
|
* items that failed to upload. This method is called at the end of each sync.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
trackRemainingChanges() {
|
|
|
|
for (let [id, change] of this._modified.entries()) {
|
|
|
|
this._tracker.addChangedID(id, change);
|
|
|
|
}
|
|
|
|
},
|
2008-07-25 12:06:23 +04:00
|
|
|
};
|
2016-09-06 21:39:13 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* A changeset is created for each sync in `Engine::get{Changed, All}IDs`,
|
|
|
|
* and stores opaque change data for tracked IDs. The default implementation
|
|
|
|
* only records timestamps, though engines can extend this to store additional
|
|
|
|
* data for each entry.
|
|
|
|
*/
|
|
|
|
class Changeset {
|
|
|
|
// Creates a changeset with an initial set of tracked entries.
|
|
|
|
constructor(changes = {}) {
|
|
|
|
this.changes = changes;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the last modified time, in seconds, for an entry in the changeset.
|
|
|
|
// `id` is guaranteed to be in the set.
|
|
|
|
getModifiedTimestamp(id) {
|
|
|
|
return this.changes[id];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds a change for a tracked ID to the changeset.
|
|
|
|
set(id, change) {
|
|
|
|
this.changes[id] = change;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Indicates whether an entry is in the changeset.
|
|
|
|
has(id) {
|
|
|
|
return id in this.changes;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deletes an entry from the changeset. Used to clean up entries for
|
|
|
|
// reconciled and successfully uploaded records.
|
|
|
|
delete(id) {
|
|
|
|
delete this.changes[id];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Swaps two entries in the changeset. Used when reconciling duplicates that
|
|
|
|
// have local changes.
|
|
|
|
swap(oldID, newID) {
|
|
|
|
this.changes[newID] = this.changes[oldID];
|
|
|
|
delete this.changes[oldID];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns an array of all tracked IDs in this changeset.
|
|
|
|
ids() {
|
|
|
|
return Object.keys(this.changes);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns an array of `[id, change]` tuples. Used to repopulate the tracker
|
|
|
|
// with entries for failed uploads at the end of a sync.
|
|
|
|
entries() {
|
|
|
|
return Object.entries(this.changes);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the number of entries in this changeset.
|
|
|
|
count() {
|
|
|
|
return this.ids().length;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clears the changeset.
|
|
|
|
clear() {
|
|
|
|
this.changes = {};
|
|
|
|
}
|
|
|
|
}
|