2018-03-13 18:23:57 +03:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
2018-05-08 18:53:06 +03:00
|
|
|
/* global __URI__ */
|
|
|
|
|
2018-03-13 18:23:57 +03:00
|
|
|
"use strict";
|
|
|
|
|
2018-05-11 18:09:44 +03:00
|
|
|
var EXPORTED_SYMBOLS = [
|
|
|
|
"RemoteSettings",
|
2018-05-08 18:53:06 +03:00
|
|
|
"jexlFilterFunc",
|
|
|
|
"remoteSettingsBroadcastHandler",
|
2018-05-11 18:09:44 +03:00
|
|
|
];
|
2018-03-13 18:23:57 +03:00
|
|
|
|
|
|
|
ChromeUtils.import("resource://gre/modules/Services.jsm");
|
|
|
|
ChromeUtils.import("resource://gre/modules/XPCOMUtils.jsm");
|
|
|
|
const { OS } = ChromeUtils.import("resource://gre/modules/osfile.jsm", {});
|
2018-05-26 03:02:29 +03:00
|
|
|
XPCOMUtils.defineLazyGlobalGetters(this, ["fetch", "indexedDB"]);
|
2018-03-13 18:23:57 +03:00
|
|
|
|
|
|
|
ChromeUtils.defineModuleGetter(this, "Kinto",
|
|
|
|
"resource://services-common/kinto-offline-client.js");
|
|
|
|
ChromeUtils.defineModuleGetter(this, "KintoHttpClient",
|
|
|
|
"resource://services-common/kinto-http-client.js");
|
|
|
|
ChromeUtils.defineModuleGetter(this, "CanonicalJSON",
|
|
|
|
"resource://gre/modules/CanonicalJSON.jsm");
|
|
|
|
ChromeUtils.defineModuleGetter(this, "UptakeTelemetry",
|
|
|
|
"resource://services-common/uptake-telemetry.js");
|
2018-05-09 23:53:04 +03:00
|
|
|
ChromeUtils.defineModuleGetter(this, "ClientEnvironmentBase",
|
|
|
|
"resource://gre/modules/components-utils/ClientEnvironment.jsm");
|
2018-05-24 11:48:00 +03:00
|
|
|
ChromeUtils.defineModuleGetter(this, "FilterExpressions",
|
|
|
|
"resource://gre/modules/components-utils/FilterExpressions.jsm");
|
2018-05-08 18:53:06 +03:00
|
|
|
ChromeUtils.defineModuleGetter(this, "pushBroadcastService",
|
|
|
|
"resource://gre/modules/PushBroadcastService.jsm");
|
2018-03-13 18:23:57 +03:00
|
|
|
|
|
|
|
const PREF_SETTINGS_SERVER = "services.settings.server";
|
2018-03-30 00:38:16 +03:00
|
|
|
const PREF_SETTINGS_DEFAULT_BUCKET = "services.settings.default_bucket";
|
|
|
|
const PREF_SETTINGS_DEFAULT_SIGNER = "services.settings.default_signer";
|
2018-03-13 18:23:57 +03:00
|
|
|
const PREF_SETTINGS_VERIFY_SIGNATURE = "services.settings.verify_signature";
|
|
|
|
const PREF_SETTINGS_SERVER_BACKOFF = "services.settings.server.backoff";
|
|
|
|
const PREF_SETTINGS_CHANGES_PATH = "services.settings.changes.path";
|
|
|
|
const PREF_SETTINGS_LAST_UPDATE = "services.settings.last_update_seconds";
|
|
|
|
const PREF_SETTINGS_LAST_ETAG = "services.settings.last_etag";
|
|
|
|
const PREF_SETTINGS_CLOCK_SKEW_SECONDS = "services.settings.clock_skew_seconds";
|
|
|
|
const PREF_SETTINGS_LOAD_DUMP = "services.settings.load_dump";
|
|
|
|
|
|
|
|
// Telemetry update source identifier.
|
|
|
|
const TELEMETRY_HISTOGRAM_KEY = "settings-changes-monitoring";
|
|
|
|
|
|
|
|
const INVALID_SIGNATURE = "Invalid content/signature";
|
2018-05-08 19:41:50 +03:00
|
|
|
const MISSING_SIGNATURE = "Missing signature";
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-05-09 23:53:04 +03:00
|
|
|
/**
|
|
|
|
* cacheProxy returns an object Proxy that will memoize properties of the target.
|
|
|
|
*/
|
|
|
|
function cacheProxy(target) {
|
|
|
|
const cache = new Map();
|
|
|
|
return new Proxy(target, {
|
|
|
|
get(target, prop, receiver) {
|
|
|
|
if (!cache.has(prop)) {
|
|
|
|
cache.set(prop, target[prop]);
|
|
|
|
}
|
|
|
|
return cache.get(prop);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
class ClientEnvironment extends ClientEnvironmentBase {
|
|
|
|
static get appID() {
|
|
|
|
// eg. Firefox is "{ec8030f7-c20a-464f-9b0e-13a3a9e97384}".
|
|
|
|
Services.appinfo.QueryInterface(Ci.nsIXULAppInfo);
|
|
|
|
return Services.appinfo.ID;
|
|
|
|
}
|
2018-05-23 20:01:36 +03:00
|
|
|
|
|
|
|
static get toolkitVersion() {
|
|
|
|
Services.appinfo.QueryInterface(Ci.nsIPlatformInfo);
|
|
|
|
return Services.appinfo.platformVersion;
|
|
|
|
}
|
2018-05-09 23:53:04 +03:00
|
|
|
}
|
|
|
|
|
2018-05-11 18:09:44 +03:00
|
|
|
/**
|
|
|
|
* Default entry filtering function, in charge of excluding remote settings entries
|
|
|
|
* where the JEXL expression evaluates into a falsy value.
|
|
|
|
*/
|
|
|
|
async function jexlFilterFunc(entry, environment) {
|
2018-05-28 14:45:27 +03:00
|
|
|
const { filter_expression } = entry;
|
|
|
|
if (!filter_expression) {
|
2018-05-11 18:09:44 +03:00
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
let result;
|
|
|
|
try {
|
|
|
|
const context = {
|
|
|
|
environment
|
|
|
|
};
|
2018-05-28 14:45:27 +03:00
|
|
|
result = await FilterExpressions.eval(filter_expression, context);
|
2018-05-11 18:09:44 +03:00
|
|
|
} catch (e) {
|
|
|
|
Cu.reportError(e);
|
|
|
|
}
|
|
|
|
return result ? entry : null;
|
|
|
|
}
|
|
|
|
|
2018-03-13 18:23:57 +03:00
|
|
|
|
|
|
|
function mergeChanges(collection, localRecords, changes) {
|
|
|
|
const records = {};
|
|
|
|
// Local records by id.
|
|
|
|
localRecords.forEach((record) => records[record.id] = collection.cleanLocalFields(record));
|
|
|
|
// All existing records are replaced by the version from the server.
|
|
|
|
changes.forEach((record) => records[record.id] = record);
|
|
|
|
|
|
|
|
return Object.values(records)
|
|
|
|
// Filter out deleted records.
|
|
|
|
.filter((record) => !record.deleted)
|
|
|
|
// Sort list by record id.
|
|
|
|
.sort((a, b) => {
|
|
|
|
if (a.id < b.id) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return a.id > b.id ? 1 : 0;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-08 19:41:50 +03:00
|
|
|
async function fetchCollectionMetadata(remote, collection) {
|
2018-03-13 18:23:57 +03:00
|
|
|
const client = new KintoHttpClient(remote);
|
2018-05-08 19:41:50 +03:00
|
|
|
const { signature } = await client.bucket(collection.bucket)
|
|
|
|
.collection(collection.name)
|
|
|
|
.getData();
|
|
|
|
return signature;
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
|
2018-05-08 19:41:50 +03:00
|
|
|
async function fetchRemoteCollection(remote, collection) {
|
2018-03-13 18:23:57 +03:00
|
|
|
const client = new KintoHttpClient(remote);
|
|
|
|
return client.bucket(collection.bucket)
|
|
|
|
.collection(collection.name)
|
|
|
|
.listRecords({sort: "id"});
|
|
|
|
}
|
|
|
|
|
|
|
|
async function fetchLatestChanges(url, lastEtag) {
|
|
|
|
//
|
|
|
|
// Fetch the list of changes objects from the server that looks like:
|
|
|
|
// {"data":[
|
|
|
|
// {
|
|
|
|
// "host":"kinto-ota.dev.mozaws.net",
|
|
|
|
// "last_modified":1450717104423,
|
|
|
|
// "bucket":"blocklists",
|
|
|
|
// "collection":"certificates"
|
|
|
|
// }]}
|
|
|
|
|
2018-05-28 17:21:09 +03:00
|
|
|
// Use ETag to obtain a `304 Not modified` when no change occurred,
|
|
|
|
// and `?_since` parameter to only keep entries that weren't processed yet.
|
2018-03-13 18:23:57 +03:00
|
|
|
const headers = {};
|
|
|
|
if (lastEtag) {
|
|
|
|
headers["If-None-Match"] = lastEtag;
|
2018-05-28 17:21:09 +03:00
|
|
|
url += `?_since=${lastEtag}`;
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
const response = await fetch(url, {headers});
|
|
|
|
|
|
|
|
let changes = [];
|
|
|
|
// If no changes since last time, go on with empty list of changes.
|
|
|
|
if (response.status != 304) {
|
|
|
|
let payload;
|
|
|
|
try {
|
|
|
|
payload = await response.json();
|
2018-06-14 21:54:19 +03:00
|
|
|
} catch (e) {
|
|
|
|
payload = e.message;
|
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
if (!payload.hasOwnProperty("data")) {
|
|
|
|
// If the server is failing, the JSON response might not contain the
|
|
|
|
// expected data (e.g. error response - Bug 1259145)
|
2018-06-14 21:54:19 +03:00
|
|
|
throw new Error(`Server error ${response.status} ${response.statusText}: ${JSON.stringify(payload)}`);
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
changes = payload.data;
|
|
|
|
}
|
|
|
|
// The server should always return ETag. But we've had situations where the CDN
|
|
|
|
// was interfering.
|
|
|
|
const currentEtag = response.headers.has("ETag") ? response.headers.get("ETag") : undefined;
|
2018-05-31 00:26:59 +03:00
|
|
|
let serverTimeMillis = Date.parse(response.headers.get("Date"));
|
|
|
|
// Since the response is served via a CDN, the Date header value could have been cached.
|
|
|
|
const ageSeconds = response.headers.has("Age") ? parseInt(response.headers.get("Age"), 10) : 0;
|
|
|
|
serverTimeMillis += ageSeconds * 1000;
|
2018-03-13 18:23:57 +03:00
|
|
|
|
|
|
|
// Check if the server asked the clients to back off.
|
|
|
|
let backoffSeconds;
|
|
|
|
if (response.headers.has("Backoff")) {
|
|
|
|
const value = parseInt(response.headers.get("Backoff"), 10);
|
|
|
|
if (!isNaN(value)) {
|
|
|
|
backoffSeconds = value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return {changes, currentEtag, serverTimeMillis, backoffSeconds};
|
|
|
|
}
|
|
|
|
|
2018-05-25 00:55:23 +03:00
|
|
|
/**
|
|
|
|
* Load the the JSON file distributed with the release for this collection.
|
2018-08-06 18:34:28 +03:00
|
|
|
* @param {String} bucket
|
|
|
|
* @param {String} collection
|
2018-05-25 00:55:23 +03:00
|
|
|
*/
|
2018-08-06 18:34:28 +03:00
|
|
|
async function loadDumpFile(bucket, collection) {
|
|
|
|
const fileURI = `resource://app/defaults/settings/${bucket}/${collection}.json`;
|
2018-05-25 00:55:23 +03:00
|
|
|
const response = await fetch(fileURI);
|
|
|
|
if (!response.ok) {
|
|
|
|
throw new Error(`Could not read from '${fileURI}'`);
|
|
|
|
}
|
|
|
|
// Will be rejected if JSON is invalid.
|
|
|
|
return response.json();
|
|
|
|
}
|
|
|
|
|
2018-03-13 18:23:57 +03:00
|
|
|
|
|
|
|
class RemoteSettingsClient {
|
|
|
|
|
2018-05-11 18:09:44 +03:00
|
|
|
constructor(collectionName, { bucketName, signerName, filterFunc = jexlFilterFunc, lastCheckTimePref }) {
|
2018-03-13 18:23:57 +03:00
|
|
|
this.collectionName = collectionName;
|
|
|
|
this.bucketName = bucketName;
|
|
|
|
this.signerName = signerName;
|
2018-05-09 23:53:04 +03:00
|
|
|
this.filterFunc = filterFunc;
|
2018-05-09 15:19:02 +03:00
|
|
|
this._lastCheckTimePref = lastCheckTimePref;
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-05-28 17:10:27 +03:00
|
|
|
this._listeners = new Map();
|
|
|
|
this._listeners.set("sync", []);
|
2018-03-13 18:23:57 +03:00
|
|
|
|
|
|
|
this._kinto = null;
|
|
|
|
}
|
|
|
|
|
|
|
|
get identifier() {
|
|
|
|
return `${this.bucketName}/${this.collectionName}`;
|
|
|
|
}
|
|
|
|
|
2018-05-09 15:19:02 +03:00
|
|
|
get lastCheckTimePref() {
|
|
|
|
return this._lastCheckTimePref || `services.settings.${this.bucketName}.${this.collectionName}.last_check`;
|
|
|
|
}
|
|
|
|
|
2018-05-28 17:10:27 +03:00
|
|
|
/**
|
|
|
|
* Event emitter: will execute the registered listeners in the order and
|
|
|
|
* sequentially.
|
|
|
|
*
|
|
|
|
* Note: we don't use `toolkit/modules/EventEmitter` because we want to throw
|
|
|
|
* an error when a listener fails to execute.
|
|
|
|
*
|
|
|
|
* @param {string} event the event name
|
|
|
|
* @param {Object} payload the event payload to call the listeners with
|
|
|
|
*/
|
|
|
|
async emit(event, payload) {
|
|
|
|
const callbacks = this._listeners.get("sync");
|
|
|
|
let firstError;
|
|
|
|
for (const cb of callbacks) {
|
|
|
|
try {
|
|
|
|
await cb(payload);
|
|
|
|
} catch (e) {
|
|
|
|
firstError = e;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (firstError) {
|
|
|
|
throw firstError;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-13 18:23:57 +03:00
|
|
|
on(event, callback) {
|
2018-05-28 17:10:27 +03:00
|
|
|
if (!this._listeners.has(event)) {
|
2018-03-13 18:23:57 +03:00
|
|
|
throw new Error(`Unknown event type ${event}`);
|
|
|
|
}
|
2018-05-28 17:10:27 +03:00
|
|
|
this._listeners.get(event).push(callback);
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
|
2018-07-27 19:55:37 +03:00
|
|
|
off(event, callback) {
|
|
|
|
if (!this._listeners.has(event)) {
|
|
|
|
throw new Error(`Unknown event type ${event}`);
|
|
|
|
}
|
|
|
|
const callbacks = this._listeners.get(event);
|
|
|
|
const i = callbacks.indexOf(callback);
|
|
|
|
if (i < 0) {
|
|
|
|
throw new Error(`Unknown callback`);
|
|
|
|
} else {
|
|
|
|
callbacks.splice(i, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-13 18:23:57 +03:00
|
|
|
/**
|
|
|
|
* Open the underlying Kinto collection, using the appropriate adapter and
|
|
|
|
* options. This acts as a context manager where the connection is closed
|
|
|
|
* once the specified `callback` has finished.
|
|
|
|
*
|
|
|
|
* @param {callback} function the async function to execute with the open SQlite connection.
|
|
|
|
* @param {Object} options additional advanced options.
|
|
|
|
* @param {string} options.hooks hooks to execute on synchronization (see Kinto.js docs)
|
|
|
|
*/
|
2018-03-22 13:39:15 +03:00
|
|
|
async openCollection(options = {}) {
|
2018-03-13 18:23:57 +03:00
|
|
|
if (!this._kinto) {
|
2018-03-22 13:39:15 +03:00
|
|
|
this._kinto = new Kinto({ bucket: this.bucketName, adapter: Kinto.adapters.IDB });
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
2018-03-22 13:39:15 +03:00
|
|
|
return this._kinto.collection(this.collectionName, options);
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
/**
|
|
|
|
* Lists settings.
|
|
|
|
*
|
|
|
|
* @param {Object} options The options object.
|
|
|
|
* @param {Object} options.filters Filter the results (default: `{}`).
|
|
|
|
* @param {Object} options.order The order to apply (default: `-last_modified`).
|
|
|
|
* @return {Promise}
|
|
|
|
*/
|
|
|
|
async get(options = {}) {
|
|
|
|
// In Bug 1451031, we will do some jexl filtering to limit the list items
|
|
|
|
// whose target is matched.
|
2018-03-22 13:39:15 +03:00
|
|
|
const { filters = {}, order } = options;
|
|
|
|
const c = await this.openCollection();
|
2018-05-10 16:27:21 +03:00
|
|
|
|
|
|
|
const timestamp = await c.db.getLastModified();
|
|
|
|
// If the local database was never synchronized, then we attempt to load
|
|
|
|
// a packaged JSON dump.
|
|
|
|
if (timestamp == null) {
|
|
|
|
try {
|
2018-08-06 18:34:28 +03:00
|
|
|
const { data } = await loadDumpFile(this.bucketName, this.collectionName);
|
2018-05-25 00:55:23 +03:00
|
|
|
await c.loadDump(data);
|
2018-05-10 16:27:21 +03:00
|
|
|
} catch (e) {
|
|
|
|
// Report but return an empty list since there will be no data anyway.
|
|
|
|
Cu.reportError(e);
|
|
|
|
return [];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-22 13:39:15 +03:00
|
|
|
const { data } = await c.list({ filters, order });
|
2018-05-09 23:53:04 +03:00
|
|
|
return this._filterEntries(data);
|
2018-03-30 00:38:16 +03:00
|
|
|
}
|
|
|
|
|
2018-03-13 18:23:57 +03:00
|
|
|
/**
|
|
|
|
* Synchronize from Kinto server, if necessary.
|
|
|
|
*
|
2018-03-22 13:39:15 +03:00
|
|
|
* @param {int} lastModified the lastModified date (on the server) for
|
|
|
|
the remote collection.
|
|
|
|
* @param {Date} serverTime the current date return by the server.
|
|
|
|
* @param {Object} options additional advanced options.
|
|
|
|
* @param {bool} options.loadDump load initial dump from disk on first sync (default: true)
|
|
|
|
* @return {Promise} which rejects on sync or process failure.
|
2018-03-13 18:23:57 +03:00
|
|
|
*/
|
2018-03-22 13:39:15 +03:00
|
|
|
async maybeSync(lastModified, serverTime, options = { loadDump: true }) {
|
2018-03-13 18:23:57 +03:00
|
|
|
const {loadDump} = options;
|
|
|
|
const remote = Services.prefs.getCharPref(PREF_SETTINGS_SERVER);
|
|
|
|
const verifySignature = Services.prefs.getBoolPref(PREF_SETTINGS_VERIFY_SIGNATURE, true);
|
|
|
|
|
|
|
|
// if there is a signerName and collection signing is enforced, add a
|
|
|
|
// hook for incoming changes that validates the signature
|
|
|
|
const colOptions = {};
|
|
|
|
if (this.signerName && verifySignature) {
|
|
|
|
colOptions.hooks = {
|
|
|
|
"incoming-changes": [(payload, collection) => {
|
|
|
|
return this._validateCollectionSignature(remote, payload, collection);
|
|
|
|
}]
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
let reportStatus = null;
|
|
|
|
try {
|
2018-03-22 13:39:15 +03:00
|
|
|
const collection = await this.openCollection(colOptions);
|
|
|
|
// Synchronize remote data into a local Sqlite DB.
|
|
|
|
let collectionLastModified = await collection.db.getLastModified();
|
|
|
|
|
|
|
|
// If there is no data currently in the collection, attempt to import
|
|
|
|
// initial data from the application defaults.
|
|
|
|
// This allows to avoid synchronizing the whole collection content on
|
|
|
|
// cold start.
|
|
|
|
if (!collectionLastModified && loadDump) {
|
|
|
|
try {
|
2018-08-06 18:34:28 +03:00
|
|
|
const initialData = await loadDumpFile(this.bucketName, this.collectionName);
|
2018-03-22 13:39:15 +03:00
|
|
|
await collection.loadDump(initialData.data);
|
|
|
|
collectionLastModified = await collection.db.getLastModified();
|
|
|
|
} catch (e) {
|
|
|
|
// Report but go-on.
|
|
|
|
Cu.reportError(e);
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
2018-03-22 13:39:15 +03:00
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-03-22 13:39:15 +03:00
|
|
|
// If the data is up to date, there's no need to sync. We still need
|
|
|
|
// to record the fact that a check happened.
|
|
|
|
if (lastModified <= collectionLastModified) {
|
|
|
|
this._updateLastCheck(serverTime);
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.UP_TO_DATE;
|
|
|
|
return;
|
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-03-22 13:39:15 +03:00
|
|
|
// Fetch changes from server.
|
2018-04-17 18:03:36 +03:00
|
|
|
let syncResult;
|
2018-03-22 13:39:15 +03:00
|
|
|
try {
|
|
|
|
// Server changes have priority during synchronization.
|
|
|
|
const strategy = Kinto.syncStrategy.SERVER_WINS;
|
2018-04-17 18:03:36 +03:00
|
|
|
syncResult = await collection.sync({remote, strategy});
|
|
|
|
const { ok } = syncResult;
|
2018-03-22 13:39:15 +03:00
|
|
|
if (!ok) {
|
|
|
|
// Some synchronization conflicts occured.
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.CONFLICT_ERROR;
|
|
|
|
throw new Error("Sync failed");
|
|
|
|
}
|
|
|
|
} catch (e) {
|
|
|
|
if (e.message == INVALID_SIGNATURE) {
|
|
|
|
// Signature verification failed during synchronzation.
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.SIGNATURE_ERROR;
|
|
|
|
// if sync fails with a signature error, it's likely that our
|
|
|
|
// local data has been modified in some way.
|
|
|
|
// We will attempt to fix this by retrieving the whole
|
|
|
|
// remote collection.
|
|
|
|
const payload = await fetchRemoteCollection(remote, collection);
|
|
|
|
try {
|
|
|
|
await this._validateCollectionSignature(remote, payload, collection, {ignoreLocal: true});
|
|
|
|
} catch (e) {
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.SIGNATURE_RETRY_ERROR;
|
2018-03-13 18:23:57 +03:00
|
|
|
throw e;
|
|
|
|
}
|
2018-04-17 18:03:36 +03:00
|
|
|
|
|
|
|
// The signature is good (we haven't thrown).
|
|
|
|
// Now we will Inspect what we had locally.
|
|
|
|
const { data: oldData } = await collection.list();
|
|
|
|
|
|
|
|
// We build a sync result as if a diff-based sync was performed.
|
|
|
|
syncResult = { created: [], updated: [], deleted: [] };
|
|
|
|
|
|
|
|
// If the remote last_modified is newer than the local last_modified,
|
|
|
|
// replace the local data
|
2018-03-22 13:39:15 +03:00
|
|
|
const localLastModified = await collection.db.getLastModified();
|
|
|
|
if (payload.last_modified >= localLastModified) {
|
2018-04-17 18:03:36 +03:00
|
|
|
const { data: newData } = payload;
|
2018-03-22 13:39:15 +03:00
|
|
|
await collection.clear();
|
2018-04-17 18:03:36 +03:00
|
|
|
await collection.loadDump(newData);
|
|
|
|
|
|
|
|
// Compare local and remote to populate the sync result
|
|
|
|
const oldById = new Map(oldData.map(e => [e.id, e]));
|
|
|
|
for (const r of newData) {
|
|
|
|
const old = oldById.get(r.id);
|
|
|
|
if (old) {
|
|
|
|
if (old.last_modified != r.last_modified) {
|
|
|
|
syncResult.updated.push({ old, new: r });
|
|
|
|
}
|
|
|
|
oldById.delete(r.id);
|
|
|
|
} else {
|
|
|
|
syncResult.created.push(r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Records that remain in our map now are those missing from remote
|
|
|
|
syncResult.deleted = Array.from(oldById.values());
|
2018-03-22 13:39:15 +03:00
|
|
|
}
|
2018-04-17 18:03:36 +03:00
|
|
|
|
2018-03-22 13:39:15 +03:00
|
|
|
} else {
|
2018-05-08 19:41:50 +03:00
|
|
|
// The sync has thrown, it can be related to metadata, network or a general error.
|
|
|
|
if (e.message == MISSING_SIGNATURE) {
|
|
|
|
// Collection metadata has no signature info, no need to retry.
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.SIGNATURE_ERROR;
|
|
|
|
} else if (/NetworkError/.test(e.message)) {
|
2018-03-22 13:39:15 +03:00
|
|
|
reportStatus = UptakeTelemetry.STATUS.NETWORK_ERROR;
|
|
|
|
} else if (/Backoff/.test(e.message)) {
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.BACKOFF;
|
|
|
|
} else {
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.SYNC_ERROR;
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
throw e;
|
|
|
|
}
|
2018-03-22 13:39:15 +03:00
|
|
|
}
|
|
|
|
|
2018-05-08 17:30:40 +03:00
|
|
|
// Handle the obtained records (ie. apply locally through events).
|
|
|
|
// Build the event data list. It should be filtered (ie. by application target)
|
|
|
|
const { created: allCreated, updated: allUpdated, deleted: allDeleted } = syncResult;
|
|
|
|
const [created, deleted, updatedFiltered] = await Promise.all(
|
|
|
|
[allCreated, allDeleted, allUpdated.map(e => e.new)].map(this._filterEntries.bind(this))
|
|
|
|
);
|
|
|
|
// For updates, keep entries whose updated form is matches the target.
|
|
|
|
const updatedFilteredIds = new Set(updatedFiltered.map(e => e.id));
|
|
|
|
const updated = allUpdated.filter(({ new: { id } }) => updatedFilteredIds.has(id));
|
|
|
|
|
|
|
|
// If every changed entry is filtered, we don't even fire the event.
|
|
|
|
if (created.length || updated.length || deleted.length) {
|
|
|
|
// Read local collection of records (also filtered).
|
|
|
|
const { data: allData } = await collection.list();
|
|
|
|
const current = await this._filterEntries(allData);
|
2018-05-28 17:10:27 +03:00
|
|
|
const payload = { data: { current, created, updated, deleted } };
|
2018-05-08 17:30:40 +03:00
|
|
|
try {
|
2018-05-28 17:10:27 +03:00
|
|
|
await this.emit("sync", payload);
|
2018-05-08 17:30:40 +03:00
|
|
|
} catch (e) {
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.APPLY_ERROR;
|
|
|
|
throw e;
|
2018-03-22 13:39:15 +03:00
|
|
|
}
|
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-03-22 13:39:15 +03:00
|
|
|
// Track last update.
|
|
|
|
this._updateLastCheck(serverTime);
|
2018-03-13 18:23:57 +03:00
|
|
|
|
|
|
|
} catch (e) {
|
|
|
|
// No specific error was tracked, mark it as unknown.
|
|
|
|
if (reportStatus === null) {
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.UNKNOWN_ERROR;
|
|
|
|
}
|
|
|
|
throw e;
|
|
|
|
} finally {
|
|
|
|
// No error was reported, this is a success!
|
|
|
|
if (reportStatus === null) {
|
|
|
|
reportStatus = UptakeTelemetry.STATUS.SUCCESS;
|
|
|
|
}
|
|
|
|
// Report success/error status to Telemetry.
|
|
|
|
UptakeTelemetry.report(this.identifier, reportStatus);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async _validateCollectionSignature(remote, payload, collection, options = {}) {
|
|
|
|
const {ignoreLocal} = options;
|
|
|
|
// this is a content-signature field from an autograph response.
|
2018-05-08 19:41:50 +03:00
|
|
|
const signaturePayload = await fetchCollectionMetadata(remote, collection);
|
|
|
|
if (!signaturePayload) {
|
|
|
|
throw new Error(MISSING_SIGNATURE);
|
|
|
|
}
|
|
|
|
const {x5u, signature} = signaturePayload;
|
2018-03-13 18:23:57 +03:00
|
|
|
const certChainResponse = await fetch(x5u);
|
|
|
|
const certChain = await certChainResponse.text();
|
|
|
|
|
|
|
|
const verifier = Cc["@mozilla.org/security/contentsignatureverifier;1"]
|
|
|
|
.createInstance(Ci.nsIContentSignatureVerifier);
|
|
|
|
|
|
|
|
let toSerialize;
|
|
|
|
if (ignoreLocal) {
|
|
|
|
toSerialize = {
|
|
|
|
last_modified: `${payload.last_modified}`,
|
|
|
|
data: payload.data
|
|
|
|
};
|
|
|
|
} else {
|
|
|
|
const {data: localRecords} = await collection.list();
|
|
|
|
const records = mergeChanges(collection, localRecords, payload.changes);
|
|
|
|
toSerialize = {
|
|
|
|
last_modified: `${payload.lastModified}`,
|
|
|
|
data: records
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
const serialized = CanonicalJSON.stringify(toSerialize);
|
|
|
|
|
|
|
|
if (verifier.verifyContentSignature(serialized, "p384ecdsa=" + signature,
|
|
|
|
certChain,
|
|
|
|
this.signerName)) {
|
|
|
|
// In case the hash is valid, apply the changes locally.
|
|
|
|
return payload;
|
|
|
|
}
|
|
|
|
throw new Error(INVALID_SIGNATURE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Save last time server was checked in users prefs.
|
|
|
|
*
|
|
|
|
* @param {Date} serverTime the current date return by server.
|
|
|
|
*/
|
|
|
|
_updateLastCheck(serverTime) {
|
|
|
|
const checkedServerTimeInSeconds = Math.round(serverTime / 1000);
|
|
|
|
Services.prefs.setIntPref(this.lastCheckTimePref, checkedServerTimeInSeconds);
|
|
|
|
}
|
2018-05-09 23:53:04 +03:00
|
|
|
|
|
|
|
async _filterEntries(data) {
|
|
|
|
// Filter entries for which calls to `this.filterFunc` returns null.
|
|
|
|
if (!this.filterFunc) {
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
const environment = cacheProxy(ClientEnvironment);
|
|
|
|
const dataPromises = data.map(e => this.filterFunc(e, environment));
|
|
|
|
const results = await Promise.all(dataPromises);
|
|
|
|
return results.filter(v => !!v);
|
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
|
2018-05-25 00:55:23 +03:00
|
|
|
/**
|
|
|
|
* Check if an IndexedDB database exists for the specified bucket and collection.
|
|
|
|
*
|
|
|
|
* @param {String} bucket
|
|
|
|
* @param {String} collection
|
|
|
|
* @return {bool} Whether it exists or not.
|
|
|
|
*/
|
|
|
|
async function databaseExists(bucket, collection) {
|
|
|
|
// The dbname is chosen by kinto.js from the bucket and collection names.
|
|
|
|
// https://github.com/Kinto/kinto.js/blob/41aa1526e/src/collection.js#L231
|
|
|
|
const dbname = `${bucket}/${collection}`;
|
|
|
|
try {
|
|
|
|
await new Promise((resolve, reject) => {
|
|
|
|
const request = indexedDB.open(dbname, 1);
|
|
|
|
request.onupgradeneeded = event => {
|
|
|
|
event.target.transaction.abort();
|
|
|
|
reject(event.target.error);
|
|
|
|
};
|
|
|
|
request.onerror = event => reject(event.target.error);
|
|
|
|
request.onsuccess = event => resolve(event.target.result);
|
|
|
|
});
|
|
|
|
return true;
|
|
|
|
} catch (e) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Check if we ship a JSON dump for the specified bucket and collection.
|
|
|
|
*
|
|
|
|
* @param {String} bucket
|
|
|
|
* @param {String} collection
|
|
|
|
* @return {bool} Whether it is present or not.
|
|
|
|
*/
|
|
|
|
async function hasLocalDump(bucket, collection) {
|
|
|
|
try {
|
2018-08-06 18:34:28 +03:00
|
|
|
await loadDumpFile(bucket, collection);
|
2018-05-25 00:55:23 +03:00
|
|
|
return true;
|
|
|
|
} catch (e) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
|
|
|
|
function remoteSettingsFunction() {
|
|
|
|
const _clients = new Map();
|
|
|
|
|
|
|
|
// If not explicitly specified, use the default bucket name and signer.
|
|
|
|
const mainBucket = Services.prefs.getCharPref(PREF_SETTINGS_DEFAULT_BUCKET);
|
|
|
|
const defaultSigner = Services.prefs.getCharPref(PREF_SETTINGS_DEFAULT_SIGNER);
|
|
|
|
|
2018-06-08 18:08:33 +03:00
|
|
|
/**
|
|
|
|
* RemoteSettings constructor.
|
|
|
|
*
|
|
|
|
* @param {String} collectionName The remote settings identifier
|
|
|
|
* @param {Object} options Advanced options
|
|
|
|
* @returns {RemoteSettingsClient} An instance of a Remote Settings client.
|
|
|
|
*/
|
2018-03-30 00:38:16 +03:00
|
|
|
const remoteSettings = function(collectionName, options) {
|
|
|
|
// Get or instantiate a remote settings client.
|
|
|
|
const rsOptions = {
|
|
|
|
bucketName: mainBucket,
|
|
|
|
signerName: defaultSigner,
|
|
|
|
...options
|
|
|
|
};
|
|
|
|
const { bucketName } = rsOptions;
|
|
|
|
const key = `${bucketName}/${collectionName}`;
|
|
|
|
if (!_clients.has(key)) {
|
2018-07-25 18:43:08 +03:00
|
|
|
// Register a new client!
|
2018-03-30 00:38:16 +03:00
|
|
|
const c = new RemoteSettingsClient(collectionName, rsOptions);
|
|
|
|
_clients.set(key, c);
|
2018-07-25 18:43:08 +03:00
|
|
|
// Invalidate the polling status, since we want the new collection to
|
|
|
|
// be taken into account.
|
|
|
|
Services.prefs.clearUserPref(PREF_SETTINGS_LAST_ETAG);
|
2018-03-30 00:38:16 +03:00
|
|
|
}
|
|
|
|
return _clients.get(key);
|
|
|
|
};
|
|
|
|
|
2018-06-08 18:08:33 +03:00
|
|
|
Object.defineProperty(remoteSettings, "pollingEndpoint", {
|
|
|
|
get() {
|
|
|
|
const kintoServer = Services.prefs.getCharPref(PREF_SETTINGS_SERVER);
|
|
|
|
const changesPath = Services.prefs.getCharPref(PREF_SETTINGS_CHANGES_PATH);
|
|
|
|
return kintoServer + changesPath;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal helper to retrieve existing instances of clients or new instances
|
|
|
|
* with default options if possible, or `null` if bucket/collection are unknown.
|
|
|
|
*/
|
|
|
|
async function _client(bucketName, collectionName) {
|
|
|
|
// Check if a client was registered for this bucket/collection. Potentially
|
|
|
|
// with some specific options like signer, filter function etc.
|
|
|
|
const key = `${bucketName}/${collectionName}`;
|
|
|
|
const client = _clients.get(key);
|
|
|
|
if (client) {
|
|
|
|
// If the bucket name was changed manually on the client instance and does not
|
|
|
|
// match, don't return it.
|
|
|
|
if (client.bucketName == bucketName) {
|
|
|
|
return client;
|
|
|
|
}
|
|
|
|
|
|
|
|
// There was no client registered for this bucket/collection, but it's the main bucket,
|
|
|
|
// therefore we can instantiate a client with the default options.
|
|
|
|
// So if we have a local database or if we ship a JSON dump, then it means that
|
|
|
|
// this client is known but it was not registered yet (eg. calling module not "imported" yet).
|
|
|
|
} else if (bucketName == mainBucket) {
|
|
|
|
const [dbExists, localDump] = await Promise.all([
|
|
|
|
databaseExists(bucketName, collectionName),
|
|
|
|
hasLocalDump(bucketName, collectionName)
|
|
|
|
]);
|
|
|
|
if (dbExists || localDump) {
|
|
|
|
return new RemoteSettingsClient(collectionName, { bucketName, signerName: defaultSigner });
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Else, we cannot return a client insttance because we are not able to synchronize data in specific buckets.
|
|
|
|
// Mainly because we cannot guess which `signerName` has to be used for example.
|
|
|
|
// And we don't want to synchronize data for collections in the main bucket that are
|
|
|
|
// completely unknown (ie. no database and no JSON dump).
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Main polling method, called by the ping mechanism.
|
|
|
|
*
|
|
|
|
* @returns {Promise} or throws error if something goes wrong.
|
|
|
|
*/
|
2018-03-30 00:38:16 +03:00
|
|
|
remoteSettings.pollChanges = async () => {
|
|
|
|
// Check if the server backoff time is elapsed.
|
|
|
|
if (Services.prefs.prefHasUserValue(PREF_SETTINGS_SERVER_BACKOFF)) {
|
|
|
|
const backoffReleaseTime = Services.prefs.getCharPref(PREF_SETTINGS_SERVER_BACKOFF);
|
|
|
|
const remainingMilliseconds = parseInt(backoffReleaseTime, 10) - Date.now();
|
|
|
|
if (remainingMilliseconds > 0) {
|
|
|
|
// Backoff time has not elapsed yet.
|
|
|
|
UptakeTelemetry.report(TELEMETRY_HISTOGRAM_KEY,
|
|
|
|
UptakeTelemetry.STATUS.BACKOFF);
|
|
|
|
throw new Error(`Server is asking clients to back off; retry in ${Math.ceil(remainingMilliseconds / 1000)}s.`);
|
|
|
|
} else {
|
|
|
|
Services.prefs.clearUserPref(PREF_SETTINGS_SERVER_BACKOFF);
|
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
let lastEtag;
|
|
|
|
if (Services.prefs.prefHasUserValue(PREF_SETTINGS_LAST_ETAG)) {
|
|
|
|
lastEtag = Services.prefs.getCharPref(PREF_SETTINGS_LAST_ETAG);
|
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
let pollResult;
|
|
|
|
try {
|
2018-06-08 18:08:33 +03:00
|
|
|
pollResult = await fetchLatestChanges(remoteSettings.pollingEndpoint, lastEtag);
|
2018-03-30 00:38:16 +03:00
|
|
|
} catch (e) {
|
|
|
|
// Report polling error to Uptake Telemetry.
|
|
|
|
let report;
|
|
|
|
if (/Server/.test(e.message)) {
|
|
|
|
report = UptakeTelemetry.STATUS.SERVER_ERROR;
|
|
|
|
} else if (/NetworkError/.test(e.message)) {
|
|
|
|
report = UptakeTelemetry.STATUS.NETWORK_ERROR;
|
|
|
|
} else {
|
|
|
|
report = UptakeTelemetry.STATUS.UNKNOWN_ERROR;
|
|
|
|
}
|
|
|
|
UptakeTelemetry.report(TELEMETRY_HISTOGRAM_KEY, report);
|
|
|
|
// No need to go further.
|
|
|
|
throw new Error(`Polling for changes failed: ${e.message}.`);
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
const {serverTimeMillis, changes, currentEtag, backoffSeconds} = pollResult;
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
// Report polling success to Uptake Telemetry.
|
|
|
|
const report = changes.length == 0 ? UptakeTelemetry.STATUS.UP_TO_DATE
|
|
|
|
: UptakeTelemetry.STATUS.SUCCESS;
|
|
|
|
UptakeTelemetry.report(TELEMETRY_HISTOGRAM_KEY, report);
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
// Check if the server asked the clients to back off (for next poll).
|
|
|
|
if (backoffSeconds) {
|
|
|
|
const backoffReleaseTime = Date.now() + backoffSeconds * 1000;
|
|
|
|
Services.prefs.setCharPref(PREF_SETTINGS_SERVER_BACKOFF, backoffReleaseTime);
|
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
// Record new update time and the difference between local and server time.
|
|
|
|
// Negative clockDifference means local time is behind server time
|
|
|
|
// by the absolute of that value in seconds (positive means it's ahead)
|
|
|
|
const clockDifference = Math.floor((Date.now() - serverTimeMillis) / 1000);
|
|
|
|
Services.prefs.setIntPref(PREF_SETTINGS_CLOCK_SKEW_SECONDS, clockDifference);
|
|
|
|
Services.prefs.setIntPref(PREF_SETTINGS_LAST_UPDATE, serverTimeMillis / 1000);
|
|
|
|
|
|
|
|
const loadDump = Services.prefs.getBoolPref(PREF_SETTINGS_LOAD_DUMP, true);
|
2018-06-08 18:08:33 +03:00
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
// Iterate through the collections version info and initiate a synchronization
|
|
|
|
// on the related remote settings client.
|
|
|
|
let firstError;
|
|
|
|
for (const change of changes) {
|
2018-06-08 18:08:33 +03:00
|
|
|
const { bucket, collection, last_modified } = change;
|
2018-05-25 00:55:23 +03:00
|
|
|
|
2018-06-08 18:08:33 +03:00
|
|
|
const client = await _client(bucket, collection);
|
|
|
|
if (!client) {
|
2018-03-30 00:38:16 +03:00
|
|
|
continue;
|
|
|
|
}
|
2018-05-25 00:55:23 +03:00
|
|
|
// Start synchronization! It will be a no-op if the specified `lastModified` equals
|
|
|
|
// the one in the local database.
|
2018-03-30 00:38:16 +03:00
|
|
|
try {
|
2018-06-08 18:08:33 +03:00
|
|
|
await client.maybeSync(last_modified, serverTimeMillis, {loadDump});
|
2018-03-30 00:38:16 +03:00
|
|
|
} catch (e) {
|
|
|
|
if (!firstError) {
|
|
|
|
firstError = e;
|
2018-05-25 00:55:23 +03:00
|
|
|
firstError.details = change;
|
2018-03-30 00:38:16 +03:00
|
|
|
}
|
|
|
|
}
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
2018-03-30 00:38:16 +03:00
|
|
|
if (firstError) {
|
|
|
|
// cause the promise to reject by throwing the first observed error
|
|
|
|
throw firstError;
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
2018-03-30 00:38:16 +03:00
|
|
|
|
|
|
|
// Save current Etag for next poll.
|
|
|
|
if (currentEtag) {
|
|
|
|
Services.prefs.setCharPref(PREF_SETTINGS_LAST_ETAG, currentEtag);
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
Services.obs.notifyObservers(null, "remote-settings-changes-polled");
|
|
|
|
};
|
2018-03-13 18:23:57 +03:00
|
|
|
|
2018-06-08 18:08:33 +03:00
|
|
|
/**
|
|
|
|
* Returns an object with polling status information and the list of
|
|
|
|
* known remote settings collections.
|
|
|
|
*/
|
|
|
|
remoteSettings.inspect = async () => {
|
|
|
|
const { changes, currentEtag: serverTimestamp } = await fetchLatestChanges(remoteSettings.pollingEndpoint);
|
|
|
|
|
|
|
|
const collections = await Promise.all(changes.map(async (change) => {
|
|
|
|
const { bucket, collection, last_modified: serverTimestamp } = change;
|
|
|
|
const client = await _client(bucket, collection);
|
|
|
|
if (!client) {
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
const kintoCol = await client.openCollection();
|
|
|
|
const localTimestamp = await kintoCol.db.getLastModified();
|
|
|
|
const lastCheck = Services.prefs.getIntPref(client.lastCheckTimePref, 0);
|
|
|
|
return {
|
|
|
|
bucket,
|
|
|
|
collection,
|
|
|
|
localTimestamp,
|
|
|
|
serverTimestamp,
|
|
|
|
lastCheck,
|
|
|
|
signerName: client.signerName
|
|
|
|
};
|
|
|
|
}));
|
|
|
|
|
|
|
|
return {
|
|
|
|
serverURL: Services.prefs.getCharPref(PREF_SETTINGS_SERVER),
|
|
|
|
serverTimestamp,
|
|
|
|
localTimestamp: Services.prefs.getCharPref(PREF_SETTINGS_LAST_ETAG, null),
|
|
|
|
lastCheck: Services.prefs.getIntPref(PREF_SETTINGS_LAST_UPDATE, 0),
|
|
|
|
mainBucket,
|
|
|
|
defaultSigner,
|
|
|
|
collections: collections.filter(c => !!c)
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2018-06-14 21:54:19 +03:00
|
|
|
/**
|
|
|
|
* Startup function called from nsBrowserGlue.
|
|
|
|
*/
|
|
|
|
remoteSettings.init = () => {
|
|
|
|
// Hook the Push broadcast and RemoteSettings polling.
|
|
|
|
const broadcastID = "remote-settings/monitor_changes";
|
|
|
|
// When we start on a new profile there will be no ETag stored.
|
|
|
|
// Use an arbitrary ETag that is guaranteed not to occur.
|
|
|
|
// This will trigger a broadcast message but that's fine because we
|
|
|
|
// will check the changes on each collection and retrieve only the
|
|
|
|
// changes (e.g. nothing if we have a dump with the same data).
|
|
|
|
const currentVersion = Services.prefs.getStringPref(PREF_SETTINGS_LAST_ETAG, "\"0\"");
|
|
|
|
const moduleInfo = {
|
|
|
|
moduleURI: __URI__,
|
|
|
|
symbolName: "remoteSettingsBroadcastHandler",
|
|
|
|
};
|
|
|
|
pushBroadcastService.addListener(broadcastID, currentVersion, moduleInfo);
|
2018-05-08 18:53:06 +03:00
|
|
|
};
|
|
|
|
|
2018-03-30 00:38:16 +03:00
|
|
|
return remoteSettings;
|
2018-03-13 18:23:57 +03:00
|
|
|
}
|
2018-03-30 00:38:16 +03:00
|
|
|
|
|
|
|
var RemoteSettings = remoteSettingsFunction();
|
2018-05-08 18:53:06 +03:00
|
|
|
|
|
|
|
var remoteSettingsBroadcastHandler = {
|
|
|
|
async receivedBroadcastMessage(data, broadcastID) {
|
|
|
|
return RemoteSettings.pollChanges();
|
|
|
|
}
|
|
|
|
};
|