diff --git a/.cargo/config.in b/.cargo/config.in index ed3b8e3289c2..538438bc9228 100644 --- a/.cargo/config.in +++ b/.cargo/config.in @@ -25,7 +25,7 @@ rev = "0dc3e6e7c5371fe21f69b847f61c65fe6d6dc317" [source."https://github.com/mozilla/application-services"] git = "https://github.com/mozilla/application-services" replace-with = "vendored-sources" -rev = "57a07e89e9ac92756b60b67c4a6ee06975b86288" +rev = "e8d7530319fa6c20d9de78d031c9398630eca3cd" [source."https://github.com/mozilla-spidermonkey/jsparagus"] git = "https://github.com/mozilla-spidermonkey/jsparagus" diff --git a/Cargo.lock b/Cargo.lock index acc6c9ef6f21..65294f9e07ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1274,7 +1274,7 @@ checksum = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" [[package]] name = "error-support" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "failure", ] @@ -1610,7 +1610,7 @@ dependencies = [ [[package]] name = "fxa-client" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "base64 0.12.0", "byteorder", @@ -2300,7 +2300,7 @@ dependencies = [ [[package]] name = "interrupt-support" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" [[package]] name = "intl-memoizer" @@ -3238,7 +3238,7 @@ dependencies = [ [[package]] name = "nss" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "base64 0.12.0", "error-support", @@ -3252,12 +3252,12 @@ dependencies = [ [[package]] name = "nss_build_common" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" [[package]] name = "nss_sys" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "nss_build_common", ] @@ -3916,7 +3916,7 @@ dependencies = [ [[package]] name = "rc_crypto" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "base64 0.12.0", "ece", @@ -4497,7 +4497,7 @@ dependencies = [ [[package]] name = "sql-support" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "ffi-support", "interrupt-support", @@ -4694,7 +4694,7 @@ dependencies = [ [[package]] name = "sync-guid" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "base64 0.12.0", "rand", @@ -4705,7 +4705,7 @@ dependencies = [ [[package]] name = "sync15" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "base16", "base64 0.12.0", @@ -4728,7 +4728,7 @@ dependencies = [ [[package]] name = "sync15-traits" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "failure", "ffi-support", @@ -5303,7 +5303,7 @@ checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" [[package]] name = "viaduct" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "failure", "failure_derive", @@ -5419,7 +5419,7 @@ dependencies = [ [[package]] name = "webext-storage" version = "0.1.0" -source = "git+https://github.com/mozilla/application-services?rev=57a07e89e9ac92756b60b67c4a6ee06975b86288#57a07e89e9ac92756b60b67c4a6ee06975b86288" +source = "git+https://github.com/mozilla/application-services?rev=e8d7530319fa6c20d9de78d031c9398630eca3cd#e8d7530319fa6c20d9de78d031c9398630eca3cd" dependencies = [ "error-support", "failure", diff --git a/services/fxaccounts/rust-bridge/firefox-accounts-bridge/Cargo.toml b/services/fxaccounts/rust-bridge/firefox-accounts-bridge/Cargo.toml index bd03c84ab349..4b5d7a6d9792 100644 --- a/services/fxaccounts/rust-bridge/firefox-accounts-bridge/Cargo.toml +++ b/services/fxaccounts/rust-bridge/firefox-accounts-bridge/Cargo.toml @@ -21,4 +21,4 @@ nsstring = { path = "../../../../xpcom/rust/nsstring" } xpcom = { path = "../../../../xpcom/rust/xpcom" } storage_variant = { path = "../../../../storage/variant" } thin-vec = { version = "0.1", features = ["gecko-ffi"] } -fxa-client = { git = "https://github.com/mozilla/application-services", rev = "57a07e89e9ac92756b60b67c4a6ee06975b86288", features = ["gecko"] } +fxa-client = { git = "https://github.com/mozilla/application-services", rev = "e8d7530319fa6c20d9de78d031c9398630eca3cd", features = ["gecko"] } diff --git a/services/sync/golden_gate/Cargo.toml b/services/sync/golden_gate/Cargo.toml index dc12de27ae98..bd57a39b410b 100644 --- a/services/sync/golden_gate/Cargo.toml +++ b/services/sync/golden_gate/Cargo.toml @@ -8,14 +8,14 @@ edition = "2018" [dependencies] atomic_refcell = "0.1" cstr = "0.1" -interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "57a07e89e9ac92756b60b67c4a6ee06975b86288" } +interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "e8d7530319fa6c20d9de78d031c9398630eca3cd" } log = "0.4" moz_task = { path = "../../../xpcom/rust/moz_task" } nserror = { path = "../../../xpcom/rust/nserror" } nsstring = { path = "../../../xpcom/rust/nsstring" } serde_json = "1" storage_variant = { path = "../../../storage/variant" } -sync15-traits = { git = "https://github.com/mozilla/application-services", rev = "57a07e89e9ac92756b60b67c4a6ee06975b86288" } +sync15-traits = { git = "https://github.com/mozilla/application-services", rev = "e8d7530319fa6c20d9de78d031c9398630eca3cd" } xpcom = { path = "../../../xpcom/rust/xpcom" } [dependencies.thin-vec] diff --git a/services/sync/modules/engines/extension-storage.js b/services/sync/modules/engines/extension-storage.js index c090cbfcb7a9..af7b9f7c8a4b 100644 --- a/services/sync/modules/engines/extension-storage.js +++ b/services/sync/modules/engines/extension-storage.js @@ -16,6 +16,7 @@ const { XPCOMUtils } = ChromeUtils.import( XPCOMUtils.defineLazyModuleGetters(this, { BridgedEngine: "resource://services-sync/bridged_engine.js", extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.jsm", + Observers: "resource://services-common/observers.js", Svc: "resource://services-sync/util.js", SyncEngine: "resource://services-sync/engines.js", Tracker: "resource://services-sync/engines.js", @@ -115,6 +116,41 @@ ExtensionStorageEngineBridge.prototype = { }); }, + _takeMigrationInfo() { + return new Promise((resolve, reject) => { + this._bridge + .QueryInterface(Ci.mozIExtensionStorageArea) + .takeMigrationInfo({ + QueryInterface: ChromeUtils.generateQI([ + Ci.mozIExtensionStorageCallback, + ]), + handleSuccess: result => { + resolve(result ? JSON.parse(result) : null); + }, + handleError: (code, message) => { + this._log.warn("Error fetching migration info", message, code); + // `takeMigrationInfo` doesn't actually perform the migration, + // just reads (and clears) any data stored in the DB from the + // previous migration. + // + // Any errors here are very likely occurring a good while + // after the migration ran, so we just warn and pretend + // nothing was there. + resolve(null); + }, + }); + }); + }, + + async _syncStartup() { + let result = await super._syncStartup(); + let info = await this._takeMigrationInfo(); + if (info) { + Observers.notify("weave:telemetry:migration", info, "webext-storage"); + } + return result; + }, + async _processIncoming() { await super._processIncoming(); try { diff --git a/services/sync/modules/telemetry.js b/services/sync/modules/telemetry.js index 6460b2f852ef..3a8c90ff7be0 100644 --- a/services/sync/modules/telemetry.js +++ b/services/sync/modules/telemetry.js @@ -86,6 +86,8 @@ const TOPICS = [ "weave:telemetry:event", "weave:telemetry:histogram", "fxa:telemetry:event", + + "weave:telemetry:migration", ]; const PING_FORMAT_VERSION = 1; @@ -561,6 +563,7 @@ class SyncTelemetryImpl { this.discarded = 0; this.events = []; this.histograms = {}; + this.migrations = []; this.maxEventsCount = Svc.Prefs.get("telemetry.maxEventsCount", 1000); this.maxPayloadCount = Svc.Prefs.get("telemetry.maxPayloadCount"); this.submissionInterval = @@ -684,11 +687,31 @@ class SyncTelemetryImpl { deviceID, sessionStartDate: this.sessionStartDate, events: this.events.length == 0 ? undefined : this.events, + migrations: this.migrations.length == 0 ? undefined : this.migrations, histograms: Object.keys(this.histograms).length == 0 ? undefined : this.histograms, }; } + _addMigrationRecord(type, info) { + log.debug("Saw telemetry migration info", type, info); + // Updates to this need to be documented in `sync-ping.rst` + switch (type) { + case "webext-storage": + this.migrations.push({ + type: "webext-storage", + entries: +info.entries, + entriesSuccessful: +info.entries_successful, + extensions: +info.extensions, + extensionsSuccessful: +info.extensions_successful, + openFailure: !!info.open_failure, + }); + break; + default: + throw new Error("Bug: Unknown migration record type " + type); + } + } + finish(reason) { // Note that we might be in the middle of a sync right now, and so we don't // want to touch this.current. @@ -696,6 +719,7 @@ class SyncTelemetryImpl { this.payloads = []; this.discarded = 0; this.events = []; + this.migrations = []; this.histograms = {}; this.submit(result); } @@ -720,7 +744,8 @@ class SyncTelemetryImpl { // We still call submit() with possibly illegal payloads so that tests can // know that the ping was built. We don't end up submitting them, however. let numEvents = record.events ? record.events.length : 0; - if (record.syncs.length || numEvents) { + let numMigrations = record.migrations ? record.migrations.length : 0; + if (record.syncs.length || numEvents || numMigrations) { log.trace( `submitting ${record.syncs.length} sync record(s) and ` + `${numEvents} event(s) to telemetry` @@ -1017,6 +1042,10 @@ class SyncTelemetryImpl { this._addHistogram(data); break; + case "weave:telemetry:migration": + this._addMigrationRecord(data, subject); + break; + default: log.warn(`unexpected observer topic ${topic}`); break; diff --git a/services/sync/tests/unit/head_helpers.js b/services/sync/tests/unit/head_helpers.js index 4b6392f5a3e9..a97e24610bab 100644 --- a/services/sync/tests/unit/head_helpers.js +++ b/services/sync/tests/unit/head_helpers.js @@ -433,7 +433,8 @@ async function sync_and_validate_telem( async function sync_engine_and_validate_telem( engine, allowErrorPings, - onError + onError, + wantFullPing = false ) { let telem = get_sync_test_telemetry(); let caughtError = null; @@ -500,6 +501,8 @@ async function sync_engine_and_validate_telem( onError(ping.syncs[0], ping); } reject(caughtError); + } else if (wantFullPing) { + resolve(ping); } else { resolve(ping.syncs[0]); } diff --git a/services/sync/tests/unit/sync_ping_schema.json b/services/sync/tests/unit/sync_ping_schema.json index 80bad34d28e3..507713cab2d7 100644 --- a/services/sync/tests/unit/sync_ping_schema.json +++ b/services/sync/tests/unit/sync_ping_schema.json @@ -35,6 +35,11 @@ "minItems": 1, "items": { "$ref": "#/definitions/event" } }, + "migrations": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#/definitions/migration" } + }, "histograms": { "type": "object", "additionalProperties": { @@ -164,6 +169,22 @@ "minItems": 4, "maxItems": 6 }, + "migration": { + "oneOf": [ + { "$ref": "#/definitions/webextMigration" } + ] + }, + "webextMigration": { + "required": ["type"], + "properties": { + "type": { "enum": ["webext-storage"] }, + "entries": { "type": "integer" }, + "entriesSuccessful": { "type": "integer" }, + "extensions": { "type": "integer" }, + "extensionsSuccessful": { "type": "integer" }, + "openFailure": { "type": "boolean" } + } + }, "error": { "oneOf": [ { "$ref": "#/definitions/httpError" }, diff --git a/services/sync/tests/unit/test_extension_storage_engine.js b/services/sync/tests/unit/test_extension_storage_engine.js index 64828dcf7d79..6183cf06f57f 100644 --- a/services/sync/tests/unit/test_extension_storage_engine.js +++ b/services/sync/tests/unit/test_extension_storage_engine.js @@ -117,6 +117,7 @@ add_task(async function test_notifyPendingChanges() { engine._bridge = { QueryInterface: ChromeUtils.generateQI([ Ci.mozIBridgedSyncEngine, + Ci.mozIExtensionStorageArea, Ci.mozISyncedExtensionStorageArea, ]), ensureCurrentSyncId(id, callback) { @@ -156,6 +157,9 @@ add_task(async function test_notifyPendingChanges() { syncFinished(callback) { callback.handleSuccess(); }, + takeMigrationInfo(callback) { + callback.handleSuccess(null); + }, }; let server = await serverForFoo(engine); diff --git a/services/sync/tests/unit/test_extension_storage_migration_telem.js b/services/sync/tests/unit/test_extension_storage_migration_telem.js new file mode 100644 index 000000000000..5977056237e5 --- /dev/null +++ b/services/sync/tests/unit/test_extension_storage_migration_telem.js @@ -0,0 +1,79 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +// Import the rust-based and kinto-based implementations. Not great to grab +// these as they're somewhat private, but we want to run the pings through our +// validation machinery which is here in the sync test code. +const { extensionStorageSync: rustImpl } = ChromeUtils.import( + "resource://gre/modules/ExtensionStorageSync.jsm" +); +const { extensionStorageSync: kintoImpl } = ChromeUtils.import( + "resource://gre/modules/ExtensionStorageSyncKinto.jsm" +); + +const { Service } = ChromeUtils.import("resource://services-sync/service.js"); +const { ExtensionStorageEngineBridge } = ChromeUtils.import( + "resource://services-sync/engines/extension-storage.js" +); + +Services.prefs.setBoolPref("webextensions.storage.sync.kinto", false); +Services.prefs.setStringPref("webextensions.storage.sync.log.level", "debug"); + +// It's tricky to force error cases here (the databases are opened with +// exclusive locks) and that part of the code has coverage in the vendored +// application-services webext-storage crate. So this just tests that the +// migration data ends up in the ping, and exactly once. +add_task(async function test_sync_migration_telem() { + // Set some stuff using the kinto-based impl prior to fully setting up sync. + let e1 = { id: "test@mozilla.com" }; + let c1 = { extension: e1, callOnClose() {} }; + + let e2 = { id: "test-2@mozilla.com" }; + let c2 = { extension: e2, callOnClose() {} }; + await kintoImpl.set(e1, { foo: "bar" }, c1); + await kintoImpl.set(e1, { baz: "quux" }, c1); + await kintoImpl.set(e2, { second: "2nd" }, c2); + + Assert.deepEqual(await rustImpl.get(e1, "foo", c1), { foo: "bar" }); + Assert.deepEqual(await rustImpl.get(e1, "baz", c1), { baz: "quux" }); + Assert.deepEqual(await rustImpl.get(e2, null, c2), { second: "2nd" }); + + // Explicitly unregister first. It's very possible this isn't needed for this + // case, however it's fairly harmless, we hope to uplift this patch to beta, + // and earlier today we had beta-only problems caused by this (bug 1629116) + await Service.engineManager.unregister("extension-storage"); + await Service.engineManager.register(ExtensionStorageEngineBridge); + let engine = Service.engineManager.get("extension-storage"); + let server = await serverForFoo(engine, undefined); + try { + await SyncTestingInfrastructure(server); + await Service.engineManager.switchAlternatives(); + + _("First sync"); + let ping = await sync_engine_and_validate_telem(engine, false, null, true); + Assert.deepEqual(ping.migrations, [ + { + type: "webext-storage", + entries: 3, + entriesSuccessful: 3, + extensions: 2, + extensionsSuccessful: 2, + openFailure: false, + }, + ]); + + // force another sync + await engine.setLastSync(0); + _("Second sync"); + + ping = await sync_engine_and_validate_telem(engine, false, null, true); + Assert.deepEqual(ping.migrations, undefined); + } finally { + await kintoImpl.clear(e1, c1); + await kintoImpl.clear(e2, c2); + await rustImpl.clear(e1, c1); + await rustImpl.clear(e2, c2); + await promiseStopServer(server); + await engine.finalize(); + } +}); diff --git a/services/sync/tests/unit/xpcshell.ini b/services/sync/tests/unit/xpcshell.ini index 5bdc81c571dd..50a03e507e1b 100644 --- a/services/sync/tests/unit/xpcshell.ini +++ b/services/sync/tests/unit/xpcshell.ini @@ -147,8 +147,13 @@ requesttimeoutfactor = 4 run-sequentially = Frequent timeouts, bug 1395148 [test_clients_escape.js] [test_doctor.js] +run-sequentially = extension-storage migration happens only once, and must be tested first. +[test_extension_storage_migration_telem.js] +run-sequentially = extension-storage migration happens only once, and must be tested first. [test_extension_storage_engine.js] +run-sequentially = extension-storage migration happens only once, and must be tested first. [test_extension_storage_engine_kinto.js] +run-sequentially = extension-storage migration happens only once, and must be tested first. [test_extension_storage_tracker_kinto.js] [test_forms_store.js] [test_forms_tracker.js] diff --git a/third_party/rust/webext-storage/.cargo-checksum.json b/third_party/rust/webext-storage/.cargo-checksum.json index d5598978cdd1..84893291e9d1 100644 --- a/third_party/rust/webext-storage/.cargo-checksum.json +++ b/third_party/rust/webext-storage/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"ffce3f5619bd479ed23e09780ed814f7e2b16a7b82e21cc00b2b91373e205d44","README.md":"1fd617294339930ee1ad5172377648b268cce0216fc3971facbfe7c6839e9ab1","build.rs":"2b827a62155a3d724cdb4c198270ea467439e537403f82fa873321ac55a69a63","sql/create_schema.sql":"cbb6d432e578c69614199f9e82f8103da5c1f6df5d7af4f77ea1be5869000b26","sql/create_sync_temp_tables.sql":"76517b1ec8022ca2ef53cf8c98b2ad0c289e7f2e2eb2a4b7abb7ad441fe27c6c","src/api.rs":"363674be0123604416fcdde1651c54cae5f8d7869408c12901d212814aa2293e","src/db.rs":"e04f19ab2e3da4423ce49d43b6c9a6d86be4e573f54421061bea04ef875afb2a","src/error.rs":"c956152633ad6c787f8b7322b619f807354d4c3cb2ecc35c549c3b4bcd98079e","src/lib.rs":"9dd35c00948358f495ba04953a22fbce5e4b663346747d531b3bbd553f4c2816","src/migration.rs":"a0e41c40c3b636f642f606655d8fcd36e65be13218c71beea0c16394e71f0f44","src/schema.rs":"cd5a03c2d2dc1eebdea30054c6f6a7a6b302184e9ad1f40de659f6b972c481cf","src/store.rs":"37b0bc71944255b394b0b7d287e1124e8b872d6638bc77250db0290876c03b26","src/sync/bridge.rs":"e60fec0f8f167f893bb2055f4623c9cc4dc6921acafd13dcc4e6cfd228b3cb42","src/sync/incoming.rs":"0bcbe042340a9989ce9d18dde5eb38e81580d824d9496c34d890ff8a18f92730","src/sync/mod.rs":"7255fa036635de1ab9223ccdb34fc6164e6978024ae480a9268743a203860448","src/sync/outgoing.rs":"2e0434359ba5005d730aebdac2e29980005f56e62003d89e7def78fcf8d13c5a","src/sync/sync_tests.rs":"e2c665046f3ad2a665eee2b5b33a89ae8804af42bf93fe7b2694280eb5b2a9cc"},"package":null} \ No newline at end of file +{"files":{"Cargo.toml":"ffce3f5619bd479ed23e09780ed814f7e2b16a7b82e21cc00b2b91373e205d44","README.md":"1fd617294339930ee1ad5172377648b268cce0216fc3971facbfe7c6839e9ab1","build.rs":"2b827a62155a3d724cdb4c198270ea467439e537403f82fa873321ac55a69a63","sql/create_schema.sql":"cbb6d432e578c69614199f9e82f8103da5c1f6df5d7af4f77ea1be5869000b26","sql/create_sync_temp_tables.sql":"76517b1ec8022ca2ef53cf8c98b2ad0c289e7f2e2eb2a4b7abb7ad441fe27c6c","src/api.rs":"363674be0123604416fcdde1651c54cae5f8d7869408c12901d212814aa2293e","src/db.rs":"e04f19ab2e3da4423ce49d43b6c9a6d86be4e573f54421061bea04ef875afb2a","src/error.rs":"c956152633ad6c787f8b7322b619f807354d4c3cb2ecc35c549c3b4bcd98079e","src/lib.rs":"373734d481e7bbd6a49a769d1212686c6f4105ba1eba985624d48208df91d174","src/migration.rs":"ca655d8e556c90e2aebb672bc3e2ad86192854459e4c1a1cfc89cc28156e7abb","src/schema.rs":"cd5a03c2d2dc1eebdea30054c6f6a7a6b302184e9ad1f40de659f6b972c481cf","src/store.rs":"f73c0eac2353d2aa9b64f779998f2d7a1b4576b331a55c2c1acedc32833e5478","src/sync/bridge.rs":"e60fec0f8f167f893bb2055f4623c9cc4dc6921acafd13dcc4e6cfd228b3cb42","src/sync/incoming.rs":"0bcbe042340a9989ce9d18dde5eb38e81580d824d9496c34d890ff8a18f92730","src/sync/mod.rs":"7255fa036635de1ab9223ccdb34fc6164e6978024ae480a9268743a203860448","src/sync/outgoing.rs":"2e0434359ba5005d730aebdac2e29980005f56e62003d89e7def78fcf8d13c5a","src/sync/sync_tests.rs":"e2c665046f3ad2a665eee2b5b33a89ae8804af42bf93fe7b2694280eb5b2a9cc"},"package":null} \ No newline at end of file diff --git a/third_party/rust/webext-storage/src/lib.rs b/third_party/rust/webext-storage/src/lib.rs index 9fbae1cfaa27..60f0b93e3fb7 100644 --- a/third_party/rust/webext-storage/src/lib.rs +++ b/third_party/rust/webext-storage/src/lib.rs @@ -13,6 +13,8 @@ mod schema; pub mod store; mod sync; +pub use migration::MigrationInfo; + // We publish some constants from non-public modules. pub use sync::STORAGE_VERSION; diff --git a/third_party/rust/webext-storage/src/migration.rs b/third_party/rust/webext-storage/src/migration.rs index 16b06eb03918..5c34b9659445 100644 --- a/third_party/rust/webext-storage/src/migration.rs +++ b/third_party/rust/webext-storage/src/migration.rs @@ -3,9 +3,10 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use crate::error::*; -use rusqlite::{Connection, OpenFlags, Transaction, NO_PARAMS}; +use rusqlite::{named_params, Connection, OpenFlags, Transaction, NO_PARAMS}; use serde_json::{Map, Value}; use sql_support::ConnExt; +use std::collections::HashSet; use std::path::Path; // Simple migration from the "old" kinto-with-sqlite-backing implementation @@ -107,12 +108,12 @@ struct Parsed<'a> { data: serde_json::Value, } -pub fn migrate(tx: &Transaction<'_>, filename: &Path) -> Result { +pub fn migrate(tx: &Transaction<'_>, filename: &Path) -> Result { // We do the grouping manually, collecting string values as we go. let mut last_ext_id = "".to_string(); let mut curr_values: Vec<(String, serde_json::Value)> = Vec::new(); - let mut num_extensions = 0; - for row in read_rows(filename) { + let (rows, mut mi) = read_rows(filename); + for row in rows { log::trace!("processing '{}' - '{}'", row.col_name, row.record); let parsed = match row.parse() { Some(p) => p, @@ -122,8 +123,9 @@ pub fn migrate(tx: &Transaction<'_>, filename: &Path) -> Result { if parsed.ext_id != last_ext_id { if last_ext_id != "" && !curr_values.is_empty() { // a different extension id - write what we have to the DB. - do_insert(tx, &last_ext_id, curr_values)?; - num_extensions += 1; + let entries = do_insert(tx, &last_ext_id, curr_values)?; + mi.extensions_successful += 1; + mi.entries_successful += entries; } last_ext_id = parsed.ext_id.to_string(); curr_values = Vec::new(); @@ -141,32 +143,34 @@ pub fn migrate(tx: &Transaction<'_>, filename: &Path) -> Result { // and the last one if last_ext_id != "" && !curr_values.is_empty() { // a different extension id - write what we have to the DB. - do_insert(tx, &last_ext_id, curr_values)?; - num_extensions += 1; + let entries = do_insert(tx, &last_ext_id, curr_values)?; + mi.extensions_successful += 1; + mi.entries_successful += entries; } - log::info!("migrated {} extensions", num_extensions); - Ok(num_extensions) + log::info!("migrated {} extensions: {:?}", mi.extensions_successful, mi); + Ok(mi) } -fn read_rows(filename: &Path) -> Vec { +fn read_rows(filename: &Path) -> (Vec, MigrationInfo) { let flags = OpenFlags::SQLITE_OPEN_NO_MUTEX | OpenFlags::SQLITE_OPEN_READ_ONLY; let src_conn = match Connection::open_with_flags(&filename, flags) { Ok(conn) => conn, Err(e) => { log::warn!("Failed to open the source DB: {}", e); - return Vec::new(); + return (Vec::new(), MigrationInfo::open_failure()); } }; // Failure to prepare the statement probably just means the source DB is // damaged. let mut stmt = match src_conn.prepare( "SELECT collection_name, record FROM collection_data + WHERE collection_name != 'default/storage-sync-crypto' ORDER BY collection_name", ) { Ok(stmt) => stmt, Err(e) => { log::warn!("Failed to prepare the statement: {}", e); - return Vec::new(); + return (Vec::new(), MigrationInfo::open_failure()); } }; let rows = match stmt.query_and_then(NO_PARAMS, |row| -> Result { @@ -178,18 +182,37 @@ fn read_rows(filename: &Path) -> Vec { Ok(r) => r, Err(e) => { log::warn!("Failed to read any rows from the source DB: {}", e); - return Vec::new(); + return (Vec::new(), MigrationInfo::open_failure()); } }; + let all_rows: Vec> = rows.collect(); + let entries = all_rows.len(); + let successful_rows: Vec = all_rows.into_iter().filter_map(Result::ok).collect(); + let distinct_extensions: HashSet<_> = successful_rows.iter().map(|c| &c.col_name).collect(); - rows.filter_map(Result::ok).collect() + let mi = MigrationInfo { + entries, + extensions: distinct_extensions.len(), + // Populated later. + extensions_successful: 0, + entries_successful: 0, + open_failure: false, + }; + + (successful_rows, mi) } -fn do_insert(tx: &Transaction<'_>, ext_id: &str, vals: Vec<(String, Value)>) -> Result<()> { +/// Insert the extension and values. If there are multiple values with the same +/// key (which shouldn't be possible but who knows, database corruption causes +/// strange things), chooses an arbitrary one. Returns the number of entries +/// inserted, which could be different from `vals.len()` if multiple entries in +/// `vals` have the same key. +fn do_insert(tx: &Transaction<'_>, ext_id: &str, vals: Vec<(String, Value)>) -> Result { let mut map = Map::with_capacity(vals.len()); for (key, val) in vals { map.insert(key, val); } + let num_entries = map.len(); tx.execute_named_cached( "INSERT OR REPLACE INTO storage_sync_data(ext_id, data, sync_change_counter) VALUES (:ext_id, :data, 1)", @@ -198,9 +221,89 @@ fn do_insert(tx: &Transaction<'_>, ext_id: &str, vals: Vec<(String, Value)>) -> ":data": &Value::Object(map), }, )?; - Ok(()) + Ok(num_entries) } +#[derive(Debug, Clone, Default, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct MigrationInfo { + /// The number of entries (rows in the original table) we attempted to + /// migrate. Zero if there was some error in computing this number. + /// + /// Note that for the original table, a single row stores a single + /// preference for one extension. That is, if you view the set of + /// preferences for a given extension as a HashMap (as we do), it would be a + /// single entry/key-value-pair in the map. + pub entries: usize, + /// The number of records we successfully migrated (equal to `entries` for + /// entirely successful migrations). + pub entries_successful: usize, + /// The number of extensions (distinct extension ids) in the original + /// table. + pub extensions: usize, + /// The number of extensions we successfully migrated + pub extensions_successful: usize, + /// True iff we failed to open the source DB at all. + pub open_failure: bool, +} + +impl MigrationInfo { + /// Returns a MigrationInfo indicating that we failed to read any rows due + /// to some error case (e.g. the database open failed, or some other very + /// early read error). + fn open_failure() -> Self { + Self { + open_failure: true, + ..Self::default() + } + } + + const META_KEY: &'static str = "migration_info"; + + /// Store `self` in the provided database under `Self::META_KEY`. + pub(crate) fn store(&self, conn: &Connection) -> Result<()> { + let json = serde_json::to_string(self)?; + conn.execute_named( + "INSERT OR REPLACE INTO meta(key, value) VALUES (:k, :v)", + named_params! { + ":k": Self::META_KEY, + ":v": &json + }, + )?; + Ok(()) + } + + /// Get the MigrationInfo stored under `Self::META_KEY` (if any) out of the + /// DB, and delete it. + pub(crate) fn take(tx: &Transaction<'_>) -> Result> { + let s = tx.try_query_one::( + "SELECT value FROM meta WHERE key = :k", + named_params! { + ":k": Self::META_KEY, + }, + false, + )?; + tx.execute_named( + "DELETE FROM meta WHERE key = :k", + named_params! { + ":k": Self::META_KEY, + }, + )?; + if let Some(s) = s { + match serde_json::from_str(&s) { + Ok(v) => Ok(Some(v)), + Err(e) => { + // Force test failure, but just log an error otherwise so that + // we commit the transaction that wil. + debug_assert!(false, "Failed to read migration JSON: {:?}", e); + log::error!("Failed to read migration JSON: {}", e); + Ok(None) + } + } + } else { + Ok(None) + } + } +} #[cfg(test)] mod tests { use super::*; @@ -209,14 +312,7 @@ mod tests { use serde_json::json; use tempfile::tempdir; - // Create a test database, populate it via the callback, migrate it, and - // return a connection to the new, migrated DB for further checking. - fn do_migrate(num_expected: usize, f: F) -> StorageDb - where - F: FnOnce(&Connection), - { - let tmpdir = tempdir().unwrap(); - let path = tmpdir.path().join("source.db"); + fn init_source_db(path: impl AsRef, f: impl FnOnce(&Connection)) { let flags = OpenFlags::SQLITE_OPEN_NO_MUTEX | OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_READ_WRITE; @@ -233,14 +329,25 @@ mod tests { f(&tx); tx.commit().expect("should commit"); conn.close().expect("close should work"); + } + + // Create a test database, populate it via the callback, migrate it, and + // return a connection to the new, migrated DB for further checking. + fn do_migrate(expect_mi: MigrationInfo, f: F) -> StorageDb + where + F: FnOnce(&Connection), + { + let tmpdir = tempdir().unwrap(); + let path = tmpdir.path().join("source.db"); + init_source_db(&path, f); // now migrate let mut db = new_mem_db(); let tx = db.transaction().expect("tx should work"); - let num = migrate(&tx, &tmpdir.path().join("source.db")).expect("migrate should work"); + let mi = migrate(&tx, &tmpdir.path().join("source.db")).expect("migrate should work"); tx.commit().expect("should work"); - assert_eq!(num, num_expected); + assert_eq!(mi, expect_mi); db } @@ -251,22 +358,30 @@ mod tests { ); } + const HAPPY_PATH_SQL: &str = r#" + INSERT INTO collection_data(collection_name, record) + VALUES + ('default/{e7fefcf3-b39c-4f17-5215-ebfe120a7031}', '{"id":"key-userWelcomed","key":"userWelcomed","data":1570659224457,"_status":"synced","last_modified":1579755940527}'), + ('default/{e7fefcf3-b39c-4f17-5215-ebfe120a7031}', '{"id":"key-isWho","key":"isWho","data":"4ec8109f","_status":"synced","last_modified":1579755940497}'), + ('default/storage-sync-crypto', '{"id":"keys","keys":{"default":["rQ=","lR="],"collections":{"extension@redux.devtools":["Bd=","ju="]}}}'), + ('default/https-everywhere@eff.org', '{"id":"key-userRules","key":"userRules","data":[],"_status":"synced","last_modified":1570079920045}'), + ('default/https-everywhere@eff.org', '{"id":"key-ruleActiveStates","key":"ruleActiveStates","data":{},"_status":"synced","last_modified":1570079919993}'), + ('default/https-everywhere@eff.org', '{"id":"key-migration_5F_version","key":"migration_version","data":2,"_status":"synced","last_modified":1570079919966}') + "#; + const HAPPY_PATH_MIGRATION_INFO: MigrationInfo = MigrationInfo { + entries: 5, + entries_successful: 5, + extensions: 2, + extensions_successful: 2, + open_failure: false, + }; + #[allow(clippy::unreadable_literal)] #[test] fn test_happy_paths() { // some real data. - let conn = do_migrate(2, |c| { - c.execute_batch( - r#"INSERT INTO collection_data(collection_name, record) - VALUES - ('default/{e7fefcf3-b39c-4f17-5215-ebfe120a7031}', '{"id":"key-userWelcomed","key":"userWelcomed","data":1570659224457,"_status":"synced","last_modified":1579755940527}'), - ('default/{e7fefcf3-b39c-4f17-5215-ebfe120a7031}', '{"id":"key-isWho","key":"isWho","data":"4ec8109f","_status":"synced","last_modified":1579755940497}'), - ('default/storage-sync-crypto', '{"id":"keys","keys":{"default":["rQ=","lR="],"collections":{"extension@redux.devtools":["Bd=","ju="]}}}'), - ('default/https-everywhere@eff.org', '{"id":"key-userRules","key":"userRules","data":[],"_status":"synced","last_modified":1570079920045}'), - ('default/https-everywhere@eff.org', '{"id":"key-ruleActiveStates","key":"ruleActiveStates","data":{},"_status":"synced","last_modified":1570079919993}'), - ('default/https-everywhere@eff.org', '{"id":"key-migration_5F_version","key":"migration_version","data":2,"_status":"synced","last_modified":1570079919966}') - "#, - ).expect("should popuplate") + let conn = do_migrate(HAPPY_PATH_MIGRATION_INFO, |c| { + c.execute_batch(HAPPY_PATH_SQL).expect("should populate") }); assert_has( @@ -283,9 +398,17 @@ mod tests { #[test] fn test_sad_paths() { - do_migrate(0, |c| { - c.execute_batch( - r#"INSERT INTO collection_data(collection_name, record) + do_migrate( + MigrationInfo { + entries: 10, + entries_successful: 0, + extensions: 6, + extensions_successful: 0, + open_failure: false, + }, + |c| { + c.execute_batch( + r#"INSERT INTO collection_data(collection_name, record) VALUES ('default/test', '{"key":2,"data":1}'), -- key not a string ('default/test', '{"key":"","data":1}'), -- key empty string @@ -298,8 +421,30 @@ mod tests { ('defaultx/test', '{"key":"k","data":1}'), -- bad key format 4 ('', '') -- empty strings "#, - ) - .expect("should populate"); + ) + .expect("should populate"); + }, + ); + } + + #[test] + fn test_migration_info_storage() { + let tmpdir = tempdir().unwrap(); + let path = tmpdir.path().join("source.db"); + init_source_db(&path, |c| { + c.execute_batch(HAPPY_PATH_SQL).expect("should populate") }); + + // now migrate + let db = crate::store::test::new_mem_store(); + db.migrate(&path).expect("migration should work"); + let mi = db + .take_migration_info() + .expect("take failed with info present"); + assert_eq!(mi, Some(HAPPY_PATH_MIGRATION_INFO)); + let mi2 = db + .take_migration_info() + .expect("take failed with info missing"); + assert_eq!(mi2, None); } } diff --git a/third_party/rust/webext-storage/src/store.rs b/third_party/rust/webext-storage/src/store.rs index 52e851e3a085..b7d6e006c3d0 100644 --- a/third_party/rust/webext-storage/src/store.rs +++ b/third_party/rust/webext-storage/src/store.rs @@ -5,7 +5,7 @@ use crate::api::{self, StorageChanges}; use crate::db::StorageDb; use crate::error::*; -use crate::migration::migrate; +use crate::migration::{migrate, MigrationInfo}; use crate::sync; use std::path::Path; use std::result; @@ -128,19 +128,34 @@ impl Store { } /// Migrates data from a database in the format of the "old" kinto - /// implementation. Returns the count of webextensions for whom data was - /// migrated. + /// implementation. Information about how the migration went is stored in + /// the database, and can be read using `Self::take_migration_info`. + /// /// Note that `filename` isn't normalized or canonicalized. - pub fn migrate(&self, filename: impl AsRef) -> Result { + pub fn migrate(&self, filename: impl AsRef) -> Result<()> { let tx = self.db.unchecked_transaction()?; let result = migrate(&tx, filename.as_ref())?; tx.commit()?; + // Failing to store this information should not cause migration failure. + if let Err(e) = result.store(&self.db) { + debug_assert!(false, "Migration error: {:?}", e); + log::warn!("Failed to record migration telmetry: {}", e); + } + Ok(()) + } + + /// Read-and-delete (e.g. `take` in rust parlance, see Option::take) + /// operation for any MigrationInfo stored in this database. + pub fn take_migration_info(&self) -> Result> { + let tx = self.db.unchecked_transaction()?; + let result = MigrationInfo::take(&tx)?; + tx.commit()?; Ok(result) } } #[cfg(test)] -mod test { +pub mod test { use super::*; #[test] fn test_send() { @@ -148,4 +163,10 @@ mod test { // Compile will fail if not send. ensure_send::(); } + + pub fn new_mem_store() -> Store { + Store { + db: crate::db::test::new_mem_db(), + } + } } diff --git a/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl b/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl index c2e63c61613c..b3dcaa247941 100644 --- a/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl +++ b/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl @@ -59,6 +59,12 @@ interface mozIExtensionStorageArea : nsISupports { void getBytesInUse(in AUTF8String extensionId, in AUTF8String keys, in mozIExtensionStorageCallback callback); + + // Gets and clears the information about the migration from the kinto + // database into the rust one. As "and clears" indicates, this will + // only produce a non-empty the first time it's called after a + // migration (which, hopefully, should only happen once). + void takeMigrationInfo(in mozIExtensionStorageCallback callback); }; // Implements additional methods for setting up and tearing down the underlying diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml b/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml index 284d9aed5d66..ff825fd937eb 100644 --- a/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml +++ b/toolkit/components/extensions/storage/webext_storage_bridge/Cargo.toml @@ -18,5 +18,5 @@ xpcom = { path = "../../../../../xpcom/rust/xpcom" } serde = "1" serde_json = "1" storage_variant = { path = "../../../../../storage/variant" } -sql-support = { git = "https://github.com/mozilla/application-services", rev = "57a07e89e9ac92756b60b67c4a6ee06975b86288" } -webext-storage = { git = "https://github.com/mozilla/application-services", rev = "57a07e89e9ac92756b60b67c4a6ee06975b86288" } +sql-support = { git = "https://github.com/mozilla/application-services", rev = "e8d7530319fa6c20d9de78d031c9398630eca3cd" } +webext-storage = { git = "https://github.com/mozilla/application-services", rev = "e8d7530319fa6c20d9de78d031c9398630eca3cd" } diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs index dacb884423b0..909e75dc13e5 100644 --- a/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs +++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/area.rs @@ -261,6 +261,14 @@ impl StorageSyncArea { } Ok(()) } + + xpcom_method!(takeMigrationInfo => TakeMigrationInfo(callback: *const mozIExtensionStorageCallback)); + + /// Fetch-and-delete (e.g. `take`) information about the migration from the + /// kinto-based extension-storage to the rust-based storage. + fn takeMigrationInfo(&self, callback: &mozIExtensionStorageCallback) -> Result<()> { + self.dispatch(Punt::TakeMigrationInfo, callback) + } } fn teardown( diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs index 94e8523494fe..61feec71def8 100644 --- a/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs +++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs @@ -40,6 +40,12 @@ pub enum Punt { /// Fetches all pending Sync change notifications to pass to /// `storage.onChanged` listeners. FetchPendingSyncChanges, + /// Fetch-and-delete (e.g. `take`) information about the migration from the + /// kinto-based extension-storage to the rust-based storage. + /// + /// This data is stored in the database instead of just being returned by + /// the call to `migrate`, as we may migrate prior to telemetry being ready. + TakeMigrationInfo, } impl Punt { @@ -53,6 +59,7 @@ impl Punt { Punt::Clear { .. } => "webext_storage::clear", Punt::GetBytesInUse { .. } => "webext_storage::get_bytes_in_use", Punt::FetchPendingSyncChanges => "webext_storage::fetch_pending_sync_changes", + Punt::TakeMigrationInfo => "webext_storage::take_migration_info", } } } @@ -184,6 +191,9 @@ impl PuntTask { }) .collect(), ), + Punt::TakeMigrationInfo => { + PuntResult::with_value(self.store()?.get()?.take_migration_info()?)? + } }) } } diff --git a/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs b/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs index c4dc60d08c59..5df295d89d65 100644 --- a/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs +++ b/toolkit/components/extensions/storage/webext_storage_bridge/src/store.rs @@ -212,9 +212,13 @@ fn init_store(config: &LazyStoreConfig) -> Result { let store = Store::new(&config.path)?; if should_migrate { match store.migrate(&config.kinto_path) { - Ok(num) => { + // It's likely to be too early for us to stick the MigrationInfo + // into the sync telemetry, a separate call to `take_migration_info` + // must be made to the store (this is done by telemetry after it's + // ready to submit the data). + Ok(()) => { // need logging, but for now let's print to stdout. - println!("extension-storage: migrated {} records", num); + println!("extension-storage: migration complete"); Ok(store) } Err(e) => { diff --git a/toolkit/components/glean/Cargo.toml b/toolkit/components/glean/Cargo.toml index df5b11fe1638..94ad24fe92c2 100644 --- a/toolkit/components/glean/Cargo.toml +++ b/toolkit/components/glean/Cargo.toml @@ -15,5 +15,5 @@ xpcom = { path = "../../../xpcom/rust/xpcom" } once_cell = "1.2.0" glean = { path = "./api" } cstr = "0.1" -viaduct = { git = "https://github.com/mozilla/application-services", rev = "57a07e89e9ac92756b60b67c4a6ee06975b86288" } # Copied from toolkit/library/rust/shared/Cargo.toml +viaduct = { git = "https://github.com/mozilla/application-services", rev = "e8d7530319fa6c20d9de78d031c9398630eca3cd" } # Copied from toolkit/library/rust/shared/Cargo.toml url = "2.1" # Copied from viaduct's deps, see https://github.com/mozilla/application-services/issues/3062 diff --git a/toolkit/components/telemetry/docs/data/sync-ping.rst b/toolkit/components/telemetry/docs/data/sync-ping.rst index b75393717edd..ed74e6c45b24 100644 --- a/toolkit/components/telemetry/docs/data/sync-ping.rst +++ b/toolkit/components/telemetry/docs/data/sync-ping.rst @@ -117,6 +117,14 @@ Structure: failureReason: { ... }, } } + ], + // Information about any storage migrations that have occurred. Omitted if it would be empty. + migrations: [ + // See the section on the `migrations` array for detailed documentation on what may appear here. + { + type: , + // per-type data + } ] }], // The "node type" as reported by the token server. This will not change @@ -211,7 +219,6 @@ syncs.devices The list of remote devices associated with this account, as reported by the clients collection. The ID of each device is hashed using the same algorithm as the local id. - Events in the "sync" ping ------------------------- @@ -282,3 +289,54 @@ client. This is logically the "other end" of ``sendcommand``. for this GUID will be the same as the flowID sent to the client via ``sendcommand``. - serverTime: (optional) Most recent server timestamp, as described above. + +The ``migrations`` Array +------------------------ + +The application-services developers are in the process of oxidizing parts of firefox sync and the related data storage code, which typically requires migrating the old storage into a new database and/or format. + +When a migration like this occurs, a record is reported in this list the next time the sync ping is submitted. + +Because the format of each data store may be drastically different, we are not attempting to come up with a generic representation here, and currently planning on allowing each migration record to vary independently (at least for now). These records will be distinctly identified by their ``"type"`` field. + +They should only appear once per migration (that is, we'd rather fail to report a record than report them multiple times). + +migrations.type: ``"webext-storage"`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This indicates a migration was performed from the legacy kinto-based extension-storage database into the new webext-storage rust implementation. + +It contains the following fields: + +- ``type``: Always the string ``"webext-storage"``. + +- ``entries``: The number of entries/preferences in the source (legacy) database, including ones we failed to read. See below for information on the distinction between ``entries`` and ``extensions`` in this record. + +- ``entriesSuccessful``: The number of entries/preferences (see below) which we have successfully migrated into the destination database.. + +- ``extensions``: The number of distinct extensions which have at least one preference in the source (legacy) database. + +- ``extensionsSuccessful``: The number of distinct extensions which have at least one preference in the destination (migrated) database. + +- ``openFailure``: A boolean flag that is true if we hit a read error prior to . This likely indicates complete corruption, or a bug in an underlying library like rusqlite. + + +Note: "entries" vs "extensions" +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``webext-storage`` migration record detailed above contains counts for both: + +- The number of "entries" detected vs successfully migrated. +- The number of "extensions" detected vs successfully migrated. + +This may seem redundant, but these refer to different (but related) things. The distinction here has to do with the way the two databases store extension-storage data: + +* The legacy database stores one row for each (``extension_id``, ``preference_name``, ``preference_value``) triple. These are referred to as ``entries``. + +* Conversely, the new database stores one row per extension, which is a pair containing both the ``extension_id``, as well as a dictionary holding all preference data, and so are equivalent to extensions. + +(The description above is a somewhat simplified view of things, as it ignores a number values each database stores which is irrelevant for migration) + +That is, ``entries`` represent each individual preference setting, and ``extensions`` represent the collected set of preferences for a given extension. + +Counts for are *both* of these are present as it's likely that the disparity would point to different kinds of issues with the migration code. diff --git a/toolkit/library/rust/shared/Cargo.toml b/toolkit/library/rust/shared/Cargo.toml index 4b8940c2b6d7..d81e97433b97 100644 --- a/toolkit/library/rust/shared/Cargo.toml +++ b/toolkit/library/rust/shared/Cargo.toml @@ -66,7 +66,7 @@ fluent-ffi = { path = "../../../../intl/l10n/rust/fluent-ffi" } firefox-accounts-bridge = { path = "../../../../services/fxaccounts/rust-bridge/firefox-accounts-bridge", optional=true } [target.'cfg(not(target_os = "android"))'.dependencies] -viaduct = { git = "https://github.com/mozilla/application-services", rev = "57a07e89e9ac92756b60b67c4a6ee06975b86288"} +viaduct = { git = "https://github.com/mozilla/application-services", rev = "e8d7530319fa6c20d9de78d031c9398630eca3cd"} webext_storage_bridge = { path = "../../../components/extensions/storage/webext_storage_bridge" } [build-dependencies]