зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1122061 - Move TelemetrySession tests out of test_telemetryPing.js. r=gfritzsche
This commit is contained in:
Родитель
32776a14b3
Коммит
93f8ea689f
|
@ -312,5 +312,5 @@ add_task(function* test_times() {
|
||||||
});
|
});
|
||||||
|
|
||||||
add_task(function* test_shutdown() {
|
add_task(function* test_shutdown() {
|
||||||
yield TelemetrySession.shutdown();
|
yield TelemetrySession.shutdown(false);
|
||||||
});
|
});
|
||||||
|
|
|
@ -211,7 +211,7 @@ this.TelemetryPing = Object.freeze({
|
||||||
},
|
},
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Only used for testing. Saves a ping to disk with a specific file name and path.
|
* Only used for testing. Saves a ping to disk and return the ping id once done.
|
||||||
*
|
*
|
||||||
* @param {String} aType The type of the ping.
|
* @param {String} aType The type of the ping.
|
||||||
* @param {Object} aPayload The actual data payload for the ping.
|
* @param {Object} aPayload The actual data payload for the ping.
|
||||||
|
@ -224,9 +224,11 @@ this.TelemetryPing = Object.freeze({
|
||||||
* environment data.
|
* environment data.
|
||||||
* @param {Boolean} [aOptions.overwrite=false] true overwrites a ping with the same name,
|
* @param {Boolean} [aOptions.overwrite=false] true overwrites a ping with the same name,
|
||||||
* if found.
|
* if found.
|
||||||
* @param {String} aOptions.filePath The path to save the ping to.
|
* @param {String} [aOptions.filePath] The path to save the ping to. Will save to default
|
||||||
|
* ping location if not provided.
|
||||||
*
|
*
|
||||||
* @returns {Promise} A promise that resolves when the ping is saved to disk.
|
* @returns {Promise<Integer>} A promise that resolves with the ping id when the ping is
|
||||||
|
* saved to disk.
|
||||||
*/
|
*/
|
||||||
testSavePingToFile: function(aType, aPayload, aOptions = {}) {
|
testSavePingToFile: function(aType, aPayload, aOptions = {}) {
|
||||||
let options = aOptions;
|
let options = aOptions;
|
||||||
|
@ -450,7 +452,7 @@ let Impl = {
|
||||||
},
|
},
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Save a ping to disk with a specific file name.
|
* Save a ping to disk and return the ping id when done.
|
||||||
*
|
*
|
||||||
* @param {String} aType The type of the ping.
|
* @param {String} aType The type of the ping.
|
||||||
* @param {Object} aPayload The actual data payload for the ping.
|
* @param {Object} aPayload The actual data payload for the ping.
|
||||||
|
@ -462,18 +464,25 @@ let Impl = {
|
||||||
* @param {Boolean} aOptions.addEnvironment true if the ping should contain the
|
* @param {Boolean} aOptions.addEnvironment true if the ping should contain the
|
||||||
* environment data.
|
* environment data.
|
||||||
* @param {Boolean} aOptions.overwrite true overwrites a ping with the same name, if found.
|
* @param {Boolean} aOptions.overwrite true overwrites a ping with the same name, if found.
|
||||||
* @param {String} aOptions.filePath The path to save the ping to.
|
* @param {String} [aOptions.filePath] The path to save the ping to. Will save to default
|
||||||
|
* ping location if not provided.
|
||||||
*
|
*
|
||||||
* @returns {Promise} A promise that resolves when the ping is saved to disk.
|
* @returns {Promise} A promise that resolves with the ping id when the ping is saved to
|
||||||
|
* disk.
|
||||||
*/
|
*/
|
||||||
testSavePingToFile: function testSavePingToFile(aType, aPayload, aOptions) {
|
testSavePingToFile: function testSavePingToFile(aType, aPayload, aOptions) {
|
||||||
this._log.trace("testSavePingToFile - Type " + aType + ", Server " + this._server +
|
this._log.trace("testSavePingToFile - Type " + aType + ", Server " + this._server +
|
||||||
", aOptions " + JSON.stringify(aOptions));
|
", aOptions " + JSON.stringify(aOptions));
|
||||||
|
|
||||||
return this.assemblePing(aType, aPayload, aOptions)
|
return this.assemblePing(aType, aPayload, aOptions)
|
||||||
.then(pingData => TelemetryFile.savePingToFile(pingData, aOptions.filePath,
|
.then(pingData => {
|
||||||
aOptions.overwrite),
|
if (aOptions.filePath) {
|
||||||
error => this._log.error("testSavePingToFile - Rejection", error));
|
return TelemetryFile.savePingToFile(pingData, aOptions.filePath, aOptions.overwrite)
|
||||||
|
.then(() => { return pingData.id; });
|
||||||
|
} else {
|
||||||
|
return TelemetryFile.savePing(pingData, aOptions.overwrite)
|
||||||
|
.then(() => { return pingData.id; });
|
||||||
|
}
|
||||||
|
}, error => this._log.error("testSavePing - Rejection", error));
|
||||||
},
|
},
|
||||||
|
|
||||||
finishPingRequest: function finishPingRequest(success, startTime, ping, isPersisted) {
|
finishPingRequest: function finishPingRequest(success, startTime, ping, isPersisted) {
|
||||||
|
|
|
@ -217,9 +217,11 @@ this.TelemetrySession = Object.freeze({
|
||||||
},
|
},
|
||||||
/**
|
/**
|
||||||
* Used only for testing purposes.
|
* Used only for testing purposes.
|
||||||
|
* @param {Boolean} [aForceSavePending=true] If true, always saves the ping whether Telemetry
|
||||||
|
* can send pings or not, which is used for testing.
|
||||||
*/
|
*/
|
||||||
shutdown: function() {
|
shutdown: function(aForceSavePending = true) {
|
||||||
return Impl.shutdown(true);
|
return Impl.shutdown(aForceSavePending);
|
||||||
},
|
},
|
||||||
/**
|
/**
|
||||||
* Used only for testing purposes.
|
* Used only for testing purposes.
|
||||||
|
@ -1163,5 +1165,6 @@ let Impl = {
|
||||||
if (Telemetry.canSend || testing) {
|
if (Telemetry.canSend || testing) {
|
||||||
return this.savePendingPings();
|
return this.savePendingPings();
|
||||||
}
|
}
|
||||||
|
return Promise.resolve();
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -15,34 +15,20 @@ const Cr = Components.results;
|
||||||
|
|
||||||
Cu.import("resource://testing-common/httpd.js", this);
|
Cu.import("resource://testing-common/httpd.js", this);
|
||||||
Cu.import("resource://gre/modules/Services.jsm");
|
Cu.import("resource://gre/modules/Services.jsm");
|
||||||
Cu.import("resource://gre/modules/LightweightThemeManager.jsm", this);
|
|
||||||
Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
|
Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
|
||||||
Cu.import("resource://gre/modules/TelemetryPing.jsm", this);
|
Cu.import("resource://gre/modules/TelemetryPing.jsm", this);
|
||||||
Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
|
|
||||||
Cu.import("resource://gre/modules/TelemetryFile.jsm", this);
|
Cu.import("resource://gre/modules/TelemetryFile.jsm", this);
|
||||||
Cu.import("resource://gre/modules/Task.jsm", this);
|
Cu.import("resource://gre/modules/Task.jsm", this);
|
||||||
Cu.import("resource://gre/modules/Promise.jsm", this);
|
Cu.import("resource://gre/modules/Promise.jsm", this);
|
||||||
Cu.import("resource://gre/modules/Preferences.jsm");
|
Cu.import("resource://gre/modules/Preferences.jsm");
|
||||||
Cu.import("resource://gre/modules/osfile.jsm", this);
|
|
||||||
|
|
||||||
const IGNORE_HISTOGRAM = "test::ignore_me";
|
const PING_FORMAT_VERSION = 2;
|
||||||
const IGNORE_HISTOGRAM_TO_CLONE = "MEMORY_HEAP_ALLOCATED";
|
const TEST_PING_TYPE = "test-ping-type";
|
||||||
const IGNORE_CLONED_HISTOGRAM = "test::ignore_me_also";
|
const TEST_PING_RETENTION = 180;
|
||||||
const ADDON_NAME = "Telemetry test addon";
|
|
||||||
const ADDON_HISTOGRAM = "addon-histogram";
|
|
||||||
// Add some unicode characters here to ensure that sending them works correctly.
|
|
||||||
const FLASH_VERSION = "\u201c1.1.1.1\u201d";
|
|
||||||
const SHUTDOWN_TIME = 10000;
|
|
||||||
const FAILED_PROFILE_LOCK_ATTEMPTS = 2;
|
|
||||||
|
|
||||||
// Constants from prio.h for nsIFileOutputStream.init
|
const PLATFORM_VERSION = "1.9.2";
|
||||||
const PR_WRONLY = 0x2;
|
const APP_VERSION = "1";
|
||||||
const PR_CREATE_FILE = 0x8;
|
const APP_NAME = "XPCShell";
|
||||||
const PR_TRUNCATE = 0x20;
|
|
||||||
const RW_OWNER = 0600;
|
|
||||||
|
|
||||||
const NUMBER_OF_THREADS_TO_LAUNCH = 30;
|
|
||||||
let gNumberOfThreadsLaunched = 0;
|
|
||||||
|
|
||||||
const PREF_BRANCH = "toolkit.telemetry.";
|
const PREF_BRANCH = "toolkit.telemetry.";
|
||||||
const PREF_ENABLED = PREF_BRANCH + "enabled";
|
const PREF_ENABLED = PREF_BRANCH + "enabled";
|
||||||
|
@ -50,8 +36,6 @@ const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
|
||||||
const PREF_FHR_SERVICE_ENABLED = "datareporting.healthreport.service.enabled";
|
const PREF_FHR_SERVICE_ENABLED = "datareporting.healthreport.service.enabled";
|
||||||
|
|
||||||
const HAS_DATAREPORTINGSERVICE = "@mozilla.org/datareporting/service;1" in Cc;
|
const HAS_DATAREPORTINGSERVICE = "@mozilla.org/datareporting/service;1" in Cc;
|
||||||
const SESSION_RECORDER_EXPECTED = HAS_DATAREPORTINGSERVICE &&
|
|
||||||
Preferences.get(PREF_FHR_SERVICE_ENABLED, true);
|
|
||||||
|
|
||||||
const Telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(Ci.nsITelemetry);
|
const Telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(Ci.nsITelemetry);
|
||||||
|
|
||||||
|
@ -65,15 +49,19 @@ XPCOMUtils.defineLazyGetter(this, "gDatareportingService",
|
||||||
.getService(Ci.nsISupports)
|
.getService(Ci.nsISupports)
|
||||||
.wrappedJSObject);
|
.wrappedJSObject);
|
||||||
|
|
||||||
function sendPing () {
|
function sendPing(aSendClientId, aSendEnvironment) {
|
||||||
TelemetrySession.gatherStartup();
|
|
||||||
if (gServerStarted) {
|
if (gServerStarted) {
|
||||||
TelemetryPing.setServer("http://localhost:" + gHttpServer.identity.primaryPort);
|
TelemetryPing.setServer("http://localhost:" + gHttpServer.identity.primaryPort);
|
||||||
return TelemetrySession.testPing();
|
|
||||||
} else {
|
} else {
|
||||||
TelemetryPing.setServer("http://doesnotexist");
|
TelemetryPing.setServer("http://doesnotexist");
|
||||||
return TelemetrySession.testPing();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let options = {
|
||||||
|
addClientId: aSendClientId,
|
||||||
|
addEnvironment: aSendEnvironment,
|
||||||
|
retentionDays: TEST_PING_RETENTION,
|
||||||
|
};
|
||||||
|
return TelemetryPing.send(TEST_PING_TYPE, {}, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
function wrapWithExceptionHandler(f) {
|
function wrapWithExceptionHandler(f) {
|
||||||
|
@ -94,40 +82,6 @@ function registerPingHandler(handler) {
|
||||||
wrapWithExceptionHandler(handler));
|
wrapWithExceptionHandler(handler));
|
||||||
}
|
}
|
||||||
|
|
||||||
function setupTestData() {
|
|
||||||
Telemetry.newHistogram(IGNORE_HISTOGRAM, "never", Telemetry.HISTOGRAM_BOOLEAN);
|
|
||||||
Telemetry.histogramFrom(IGNORE_CLONED_HISTOGRAM, IGNORE_HISTOGRAM_TO_CLONE);
|
|
||||||
Services.startup.interrupted = true;
|
|
||||||
Telemetry.registerAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM,
|
|
||||||
Telemetry.HISTOGRAM_LINEAR,
|
|
||||||
1, 5, 6);
|
|
||||||
let h1 = Telemetry.getAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM);
|
|
||||||
h1.add(1);
|
|
||||||
let h2 = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
|
|
||||||
h2.add();
|
|
||||||
|
|
||||||
let k1 = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
|
|
||||||
k1.add("a");
|
|
||||||
k1.add("a");
|
|
||||||
k1.add("b");
|
|
||||||
}
|
|
||||||
|
|
||||||
function getSavedHistogramsFile(basename) {
|
|
||||||
let tmpDir = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
|
||||||
let histogramsFile = tmpDir.clone();
|
|
||||||
histogramsFile.append(basename);
|
|
||||||
if (histogramsFile.exists()) {
|
|
||||||
histogramsFile.remove(true);
|
|
||||||
}
|
|
||||||
do_register_cleanup(function () {
|
|
||||||
try {
|
|
||||||
histogramsFile.remove(true);
|
|
||||||
} catch (e) {
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return histogramsFile;
|
|
||||||
}
|
|
||||||
|
|
||||||
function decodeRequestPayload(request) {
|
function decodeRequestPayload(request) {
|
||||||
let s = request.bodyInputStream;
|
let s = request.bodyInputStream;
|
||||||
let payload = null;
|
let payload = null;
|
||||||
|
@ -164,242 +118,53 @@ function decodeRequestPayload(request) {
|
||||||
return payload;
|
return payload;
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkPayloadInfo(payload, reason) {
|
function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
|
||||||
// get rid of the non-deterministic field
|
const MANDATORY_PING_FIELDS = [
|
||||||
const expected_info = {
|
"type", "id", "creationDate", "version", "application", "payload"
|
||||||
OS: "XPCShell",
|
];
|
||||||
appVersion: "1",
|
|
||||||
appName: "XPCShell",
|
const APPLICATION_TEST_DATA = {
|
||||||
appBuildID: "2007010101",
|
buildId: "2007010101",
|
||||||
platformBuildID: "2007010101",
|
name: APP_NAME,
|
||||||
flashVersion: FLASH_VERSION
|
version: APP_VERSION,
|
||||||
|
vendor: "Mozilla",
|
||||||
|
platformVersion: PLATFORM_VERSION,
|
||||||
|
xpcomAbi: "noarch-spidermonkey",
|
||||||
};
|
};
|
||||||
|
|
||||||
for (let f in expected_info) {
|
// Check that the ping contains all the mandatory fields.
|
||||||
do_check_eq(payload.info[f], expected_info[f]);
|
for (let f of MANDATORY_PING_FIELDS) {
|
||||||
|
Assert.ok(f in aPing, f + " must be available.");
|
||||||
}
|
}
|
||||||
|
|
||||||
do_check_eq(payload.info.reason, reason);
|
Assert.equal(aPing.type, aType, "The ping must have the correct type.");
|
||||||
do_check_true("appUpdateChannel" in payload.info);
|
Assert.equal(aPing.version, PING_FORMAT_VERSION, "The ping must have the correct version.");
|
||||||
do_check_true("revision" in payload.info);
|
|
||||||
if (Services.appinfo.isOfficial) {
|
// Test the application section.
|
||||||
do_check_true(payload.info.revision.startsWith("http"));
|
for (let f in APPLICATION_TEST_DATA) {
|
||||||
|
Assert.equal(aPing.application[f], APPLICATION_TEST_DATA[f],
|
||||||
|
f + " must have the correct value.");
|
||||||
}
|
}
|
||||||
|
|
||||||
if ("@mozilla.org/datareporting/service;1" in Cc &&
|
// We can't check the values for channel and architecture. Just make
|
||||||
Services.prefs.getBoolPref(PREF_FHR_UPLOAD_ENABLED)) {
|
// sure they are in.
|
||||||
do_check_true("clientID" in payload);
|
Assert.ok("architecture" in aPing.application,
|
||||||
do_check_neq(payload.clientID, null);
|
"The application section must have an architecture field.");
|
||||||
do_check_eq(payload.clientID, gDataReportingClientID);
|
Assert.ok("channel" in aPing.application,
|
||||||
}
|
"The application section must have a channel field.");
|
||||||
|
|
||||||
|
// Check the clientId and environment fields, as needed.
|
||||||
|
Assert.equal("clientId" in aPing, aHasClientId);
|
||||||
|
Assert.equal("environment" in aPing, aHasEnvironment);
|
||||||
}
|
}
|
||||||
|
|
||||||
function checkPayload(request, payload, reason, successfulPings) {
|
/**
|
||||||
// Take off ["","submit","telemetry"].
|
* Start the webserver used in the tests.
|
||||||
let pathComponents = request.path.split("/").slice(3);
|
*/
|
||||||
|
function startWebserver() {
|
||||||
checkPayloadInfo(payload, reason);
|
gHttpServer.start(-1);
|
||||||
do_check_eq(reason, pathComponents[1]);
|
gServerStarted = true;
|
||||||
do_check_eq(request.getHeader("content-type"), "application/json; charset=UTF-8");
|
gRequestIterator = Iterator(new Request());
|
||||||
do_check_true(payload.simpleMeasurements.uptime >= 0);
|
|
||||||
do_check_true(payload.simpleMeasurements.startupInterrupted === 1);
|
|
||||||
do_check_eq(payload.simpleMeasurements.shutdownDuration, SHUTDOWN_TIME);
|
|
||||||
do_check_eq(payload.simpleMeasurements.savedPings, 1);
|
|
||||||
do_check_true("maximalNumberOfConcurrentThreads" in payload.simpleMeasurements);
|
|
||||||
do_check_true(payload.simpleMeasurements.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
|
|
||||||
|
|
||||||
let activeTicks = payload.simpleMeasurements.activeTicks;
|
|
||||||
do_check_true(SESSION_RECORDER_EXPECTED ? activeTicks >= 0 : activeTicks == -1);
|
|
||||||
|
|
||||||
do_check_eq(payload.simpleMeasurements.failedProfileLockCount,
|
|
||||||
FAILED_PROFILE_LOCK_ATTEMPTS);
|
|
||||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
|
||||||
let failedProfileLocksFile = profileDirectory.clone();
|
|
||||||
failedProfileLocksFile.append("Telemetry.FailedProfileLocks.txt");
|
|
||||||
do_check_true(!failedProfileLocksFile.exists());
|
|
||||||
|
|
||||||
|
|
||||||
let isWindows = ("@mozilla.org/windows-registry-key;1" in Components.classes);
|
|
||||||
if (isWindows) {
|
|
||||||
do_check_true(payload.simpleMeasurements.startupSessionRestoreReadBytes > 0);
|
|
||||||
do_check_true(payload.simpleMeasurements.startupSessionRestoreWriteBytes > 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
const TELEMETRY_PING = "TELEMETRY_PING";
|
|
||||||
const TELEMETRY_SUCCESS = "TELEMETRY_SUCCESS";
|
|
||||||
const TELEMETRY_TEST_FLAG = "TELEMETRY_TEST_FLAG";
|
|
||||||
const TELEMETRY_TEST_COUNT = "TELEMETRY_TEST_COUNT";
|
|
||||||
const TELEMETRY_TEST_KEYED_FLAG = "TELEMETRY_TEST_KEYED_FLAG";
|
|
||||||
const TELEMETRY_TEST_KEYED_COUNT = "TELEMETRY_TEST_KEYED_COUNT";
|
|
||||||
const READ_SAVED_PING_SUCCESS = "READ_SAVED_PING_SUCCESS";
|
|
||||||
|
|
||||||
do_check_true(TELEMETRY_PING in payload.histograms);
|
|
||||||
do_check_true(READ_SAVED_PING_SUCCESS in payload.histograms);
|
|
||||||
do_check_true(TELEMETRY_TEST_FLAG in payload.histograms);
|
|
||||||
do_check_true(TELEMETRY_TEST_COUNT in payload.histograms);
|
|
||||||
|
|
||||||
let rh = Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []);
|
|
||||||
for (let name of rh) {
|
|
||||||
if (/SQLITE/.test(name) && name in payload.histograms) {
|
|
||||||
do_check_true(("STARTUP_" + name) in payload.histograms);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
do_check_false(IGNORE_HISTOGRAM in payload.histograms);
|
|
||||||
do_check_false(IGNORE_CLONED_HISTOGRAM in payload.histograms);
|
|
||||||
|
|
||||||
// Flag histograms should automagically spring to life.
|
|
||||||
const expected_flag = {
|
|
||||||
range: [1, 2],
|
|
||||||
bucket_count: 3,
|
|
||||||
histogram_type: 3,
|
|
||||||
values: {0:1, 1:0},
|
|
||||||
sum: 0,
|
|
||||||
sum_squares_lo: 0,
|
|
||||||
sum_squares_hi: 0
|
|
||||||
};
|
|
||||||
let flag = payload.histograms[TELEMETRY_TEST_FLAG];
|
|
||||||
do_check_eq(uneval(flag), uneval(expected_flag));
|
|
||||||
|
|
||||||
// We should have a test count.
|
|
||||||
const expected_count = {
|
|
||||||
range: [1, 2],
|
|
||||||
bucket_count: 3,
|
|
||||||
histogram_type: 4,
|
|
||||||
values: {0:1, 1:0},
|
|
||||||
sum: 1,
|
|
||||||
sum_squares_lo: 1,
|
|
||||||
sum_squares_hi: 0,
|
|
||||||
};
|
|
||||||
let count = payload.histograms[TELEMETRY_TEST_COUNT];
|
|
||||||
do_check_eq(uneval(count), uneval(expected_count));
|
|
||||||
|
|
||||||
// There should be one successful report from the previous telemetry ping.
|
|
||||||
const expected_tc = {
|
|
||||||
range: [1, 2],
|
|
||||||
bucket_count: 3,
|
|
||||||
histogram_type: 2,
|
|
||||||
values: {0:1, 1:successfulPings, 2:0},
|
|
||||||
sum: successfulPings,
|
|
||||||
sum_squares_lo: successfulPings,
|
|
||||||
sum_squares_hi: 0
|
|
||||||
};
|
|
||||||
let tc = payload.histograms[TELEMETRY_SUCCESS];
|
|
||||||
do_check_eq(uneval(tc), uneval(expected_tc));
|
|
||||||
|
|
||||||
let h = payload.histograms[READ_SAVED_PING_SUCCESS];
|
|
||||||
do_check_eq(h.values[0], 1);
|
|
||||||
|
|
||||||
// The ping should include data from memory reporters. We can't check that
|
|
||||||
// this data is correct, because we can't control the values returned by the
|
|
||||||
// memory reporters. But we can at least check that the data is there.
|
|
||||||
//
|
|
||||||
// It's important to check for the presence of reporters with a mix of units,
|
|
||||||
// because TelemetryPing has separate logic for each one. But we can't
|
|
||||||
// currently check UNITS_COUNT_CUMULATIVE or UNITS_PERCENTAGE because
|
|
||||||
// Telemetry doesn't touch a memory reporter with these units that's
|
|
||||||
// available on all platforms.
|
|
||||||
|
|
||||||
do_check_true('MEMORY_JS_GC_HEAP' in payload.histograms); // UNITS_BYTES
|
|
||||||
do_check_true('MEMORY_JS_COMPARTMENTS_SYSTEM' in payload.histograms); // UNITS_COUNT
|
|
||||||
|
|
||||||
// We should have included addon histograms.
|
|
||||||
do_check_true("addonHistograms" in payload);
|
|
||||||
do_check_true(ADDON_NAME in payload.addonHistograms);
|
|
||||||
do_check_true(ADDON_HISTOGRAM in payload.addonHistograms[ADDON_NAME]);
|
|
||||||
|
|
||||||
do_check_true(("mainThread" in payload.slowSQL) &&
|
|
||||||
("otherThreads" in payload.slowSQL));
|
|
||||||
|
|
||||||
// Check keyed histogram payload.
|
|
||||||
|
|
||||||
do_check_true("keyedHistograms" in payload);
|
|
||||||
let keyedHistograms = payload.keyedHistograms;
|
|
||||||
do_check_true(TELEMETRY_TEST_KEYED_FLAG in keyedHistograms);
|
|
||||||
do_check_true(TELEMETRY_TEST_KEYED_COUNT in keyedHistograms);
|
|
||||||
|
|
||||||
Assert.deepEqual({}, keyedHistograms[TELEMETRY_TEST_KEYED_FLAG]);
|
|
||||||
|
|
||||||
const expected_keyed_count = {
|
|
||||||
"a": {
|
|
||||||
range: [1, 2],
|
|
||||||
bucket_count: 3,
|
|
||||||
histogram_type: 4,
|
|
||||||
values: {0:2, 1:0},
|
|
||||||
sum: 2,
|
|
||||||
sum_squares_lo: 2,
|
|
||||||
sum_squares_hi: 0,
|
|
||||||
},
|
|
||||||
"b": {
|
|
||||||
range: [1, 2],
|
|
||||||
bucket_count: 3,
|
|
||||||
histogram_type: 4,
|
|
||||||
values: {0:1, 1:0},
|
|
||||||
sum: 1,
|
|
||||||
sum_squares_lo: 1,
|
|
||||||
sum_squares_hi: 0,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
Assert.deepEqual(expected_keyed_count, keyedHistograms[TELEMETRY_TEST_KEYED_COUNT]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// A fake plugin host for testing flash version telemetry
|
|
||||||
let PluginHost = {
|
|
||||||
getPluginTags: function(countRef) {
|
|
||||||
let plugins = [{name: "Shockwave Flash", version: FLASH_VERSION}];
|
|
||||||
countRef.value = plugins.length;
|
|
||||||
return plugins;
|
|
||||||
},
|
|
||||||
|
|
||||||
QueryInterface: function(iid) {
|
|
||||||
if (iid.equals(Ci.nsIPluginHost)
|
|
||||||
|| iid.equals(Ci.nsISupports))
|
|
||||||
return this;
|
|
||||||
|
|
||||||
throw Components.results.NS_ERROR_NO_INTERFACE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let PluginHostFactory = {
|
|
||||||
createInstance: function (outer, iid) {
|
|
||||||
if (outer != null)
|
|
||||||
throw Components.results.NS_ERROR_NO_AGGREGATION;
|
|
||||||
return PluginHost.QueryInterface(iid);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const PLUGINHOST_CONTRACTID = "@mozilla.org/plugin/host;1";
|
|
||||||
const PLUGINHOST_CID = Components.ID("{2329e6ea-1f15-4cbe-9ded-6e98e842de0e}");
|
|
||||||
|
|
||||||
function registerFakePluginHost() {
|
|
||||||
let registrar = Components.manager.QueryInterface(Ci.nsIComponentRegistrar);
|
|
||||||
registrar.registerFactory(PLUGINHOST_CID, "Fake Plugin Host",
|
|
||||||
PLUGINHOST_CONTRACTID, PluginHostFactory);
|
|
||||||
}
|
|
||||||
|
|
||||||
function writeStringToFile(file, contents) {
|
|
||||||
let ostream = Cc["@mozilla.org/network/safe-file-output-stream;1"]
|
|
||||||
.createInstance(Ci.nsIFileOutputStream);
|
|
||||||
ostream.init(file, PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
|
|
||||||
RW_OWNER, ostream.DEFER_OPEN);
|
|
||||||
ostream.write(contents, contents.length);
|
|
||||||
ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
|
|
||||||
ostream.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
function write_fake_shutdown_file() {
|
|
||||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
|
||||||
let file = profileDirectory.clone();
|
|
||||||
file.append("Telemetry.ShutdownTime.txt");
|
|
||||||
let contents = "" + SHUTDOWN_TIME;
|
|
||||||
writeStringToFile(file, contents);
|
|
||||||
}
|
|
||||||
|
|
||||||
function write_fake_failedprofilelocks_file() {
|
|
||||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
|
||||||
let file = profileDirectory.clone();
|
|
||||||
file.append("Telemetry.FailedProfileLocks.txt");
|
|
||||||
let contents = "" + FAILED_PROFILE_LOCK_ATTEMPTS;
|
|
||||||
writeStringToFile(file, contents);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function run_test() {
|
function run_test() {
|
||||||
|
@ -419,67 +184,13 @@ function run_test() {
|
||||||
gDatareportingService.observe(null, "profile-after-change", null);
|
gDatareportingService.observe(null, "profile-after-change", null);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make it look like we've previously failed to lock a profile a couple times.
|
Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(run_next_test));
|
||||||
write_fake_failedprofilelocks_file();
|
|
||||||
|
|
||||||
// Make it look like we've shutdown before.
|
|
||||||
write_fake_shutdown_file();
|
|
||||||
|
|
||||||
let currentMaxNumberOfThreads = Telemetry.maximalNumberOfConcurrentThreads;
|
|
||||||
do_check_true(currentMaxNumberOfThreads > 0);
|
|
||||||
|
|
||||||
// Try to augment the maximal number of threads currently launched
|
|
||||||
let threads = [];
|
|
||||||
try {
|
|
||||||
for (let i = 0; i < currentMaxNumberOfThreads + 10; ++i) {
|
|
||||||
threads.push(Services.tm.newThread(0));
|
|
||||||
}
|
|
||||||
} catch (ex) {
|
|
||||||
// If memory is too low, it is possible that not all threads will be launched.
|
|
||||||
}
|
|
||||||
gNumberOfThreadsLaunched = threads.length;
|
|
||||||
|
|
||||||
do_check_true(Telemetry.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
|
|
||||||
|
|
||||||
do_register_cleanup(function() {
|
|
||||||
threads.forEach(function(thread) {
|
|
||||||
thread.shutdown();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(actualTest));
|
|
||||||
}
|
|
||||||
|
|
||||||
function actualTest() {
|
|
||||||
// try to make LightweightThemeManager do stuff
|
|
||||||
let gInternalManager = Cc["@mozilla.org/addons/integration;1"]
|
|
||||||
.getService(Ci.nsIObserver)
|
|
||||||
.QueryInterface(Ci.nsITimerCallback);
|
|
||||||
|
|
||||||
gInternalManager.observe(null, "addons-startup", null);
|
|
||||||
|
|
||||||
// fake plugin host for consistent flash version data
|
|
||||||
registerFakePluginHost();
|
|
||||||
|
|
||||||
run_next_test();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
add_task(function* asyncSetup() {
|
add_task(function* asyncSetup() {
|
||||||
yield TelemetrySession.setup();
|
|
||||||
yield TelemetryPing.setup();
|
yield TelemetryPing.setup();
|
||||||
|
|
||||||
if (HAS_DATAREPORTINGSERVICE) {
|
if (HAS_DATAREPORTINGSERVICE) {
|
||||||
// force getSessionRecorder()==undefined to check the payload's activeTicks
|
|
||||||
gDatareportingService.simulateNoSessionRecorder();
|
|
||||||
}
|
|
||||||
|
|
||||||
// When no DRS or no DRS.getSessionRecorder(), activeTicks should be -1.
|
|
||||||
do_check_eq(TelemetrySession.getPayload().simpleMeasurements.activeTicks, -1);
|
|
||||||
|
|
||||||
if (HAS_DATAREPORTINGSERVICE) {
|
|
||||||
// Restore normal behavior for getSessionRecorder()
|
|
||||||
gDatareportingService.simulateRestoreSessionRecorder();
|
|
||||||
|
|
||||||
gDataReportingClientID = yield gDatareportingService.getClientID();
|
gDataReportingClientID = yield gDatareportingService.getClientID();
|
||||||
|
|
||||||
// We should have cached the client id now. Lets confirm that by
|
// We should have cached the client id now. Lets confirm that by
|
||||||
|
@ -492,273 +203,68 @@ add_task(function* asyncSetup() {
|
||||||
|
|
||||||
// Ensure that not overwriting an existing file fails silently
|
// Ensure that not overwriting an existing file fails silently
|
||||||
add_task(function* test_overwritePing() {
|
add_task(function* test_overwritePing() {
|
||||||
let ping = {slug: "foo"}
|
let ping = {id: "foo"}
|
||||||
yield TelemetryFile.savePing(ping, true);
|
yield TelemetryFile.savePing(ping, true);
|
||||||
yield TelemetryFile.savePing(ping, false);
|
yield TelemetryFile.savePing(ping, false);
|
||||||
yield TelemetryFile.cleanupPingFile(ping);
|
yield TelemetryFile.cleanupPingFile(ping);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Ensures that expired histograms are not part of the payload.
|
|
||||||
add_task(function* test_expiredHistogram() {
|
|
||||||
let histogram_id = "FOOBAR";
|
|
||||||
let dummy = Telemetry.newHistogram(histogram_id, "30", Telemetry.HISTOGRAM_EXPONENTIAL, 1, 2, 3);
|
|
||||||
|
|
||||||
dummy.add(1);
|
|
||||||
|
|
||||||
do_check_eq(TelemetrySession.getPayload()["histograms"][histogram_id], undefined);
|
|
||||||
do_check_eq(TelemetrySession.getPayload()["histograms"]["TELEMETRY_TEST_EXPIRED"], undefined);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Checks that an invalid histogram file is deleted if TelemetryFile fails to parse it.
|
|
||||||
add_task(function* test_runInvalidJSON() {
|
|
||||||
let histogramsFile = getSavedHistogramsFile("invalid-histograms.dat");
|
|
||||||
|
|
||||||
writeStringToFile(histogramsFile, "this.is.invalid.JSON");
|
|
||||||
do_check_true(histogramsFile.exists());
|
|
||||||
|
|
||||||
yield TelemetrySession.testLoadHistograms(histogramsFile);
|
|
||||||
do_check_false(histogramsFile.exists());
|
|
||||||
});
|
|
||||||
|
|
||||||
// Sends a ping to a non existing server.
|
// Sends a ping to a non existing server.
|
||||||
add_task(function* test_noServerPing() {
|
add_task(function* test_noServerPing() {
|
||||||
yield sendPing();
|
yield sendPing(false, false);
|
||||||
});
|
});
|
||||||
|
|
||||||
// Checks that a sent ping is correctly received by a dummy http server.
|
// Checks that a sent ping is correctly received by a dummy http server.
|
||||||
add_task(function* test_simplePing() {
|
add_task(function* test_simplePing() {
|
||||||
gHttpServer.start(-1);
|
startWebserver();
|
||||||
gServerStarted = true;
|
|
||||||
gRequestIterator = Iterator(new Request());
|
|
||||||
|
|
||||||
yield sendPing();
|
yield sendPing(false, false);
|
||||||
let request = yield gRequestIterator.next();
|
let request = yield gRequestIterator.next();
|
||||||
let payload = decodeRequestPayload(request);
|
let ping = decodeRequestPayload(request);
|
||||||
|
checkPingFormat(ping, TEST_PING_TYPE, false, false);
|
||||||
checkPayloadInfo(payload, "test-ping");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Saves the current session histograms, reloads them, perfoms a ping
|
add_task(function* test_pingHasClientId() {
|
||||||
// and checks that the dummy http server received both the previously
|
// Send a ping with a clientId.
|
||||||
// saved histograms and the new ones.
|
yield sendPing(true, false);
|
||||||
add_task(function* test_saveLoadPing() {
|
|
||||||
let histogramsFile = getSavedHistogramsFile("saved-histograms.dat");
|
|
||||||
|
|
||||||
setupTestData();
|
let request = yield gRequestIterator.next();
|
||||||
yield TelemetrySession.testSaveHistograms(histogramsFile);
|
let ping = decodeRequestPayload(request);
|
||||||
yield TelemetrySession.testLoadHistograms(histogramsFile);
|
checkPingFormat(ping, TEST_PING_TYPE, true, false);
|
||||||
yield sendPing();
|
|
||||||
|
|
||||||
// Get requests received by dummy server.
|
if (HAS_DATAREPORTINGSERVICE &&
|
||||||
let request1 = yield gRequestIterator.next();
|
Services.prefs.getBoolPref(PREF_FHR_UPLOAD_ENABLED)) {
|
||||||
let request2 = yield gRequestIterator.next();
|
Assert.equal(ping.clientId, gDataReportingClientID,
|
||||||
|
"The correct clientId must be reported.");
|
||||||
// We decode both requests to check for the |reason|.
|
|
||||||
let payload1 = decodeRequestPayload(request1);
|
|
||||||
let payload2 = decodeRequestPayload(request2);
|
|
||||||
|
|
||||||
// Check we have the correct two requests. Ordering is not guaranteed.
|
|
||||||
if (payload1.info.reason === "test-ping") {
|
|
||||||
checkPayload(request1, payload1, "test-ping", 1);
|
|
||||||
checkPayload(request2, payload2, "saved-session", 1);
|
|
||||||
} else {
|
|
||||||
checkPayload(request1, payload1, "saved-session", 1);
|
|
||||||
checkPayload(request2, payload2, "test-ping", 1);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
add_task(function* test_checkSubsession() {
|
add_task(function* test_pingHasEnvironment() {
|
||||||
const COUNT_ID = "TELEMETRY_TEST_COUNT";
|
// Send a ping with the environment data.
|
||||||
const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
|
yield sendPing(false, true);
|
||||||
const count = Telemetry.getHistogramById(COUNT_ID);
|
let request = yield gRequestIterator.next();
|
||||||
const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
|
let ping = decodeRequestPayload(request);
|
||||||
const registeredIds =
|
checkPingFormat(ping, TEST_PING_TYPE, false, true);
|
||||||
new Set(Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []));
|
|
||||||
|
|
||||||
const stableHistograms = new Set([
|
// Test a field in the environment build section.
|
||||||
"TELEMETRY_TEST_FLAG",
|
Assert.equal(ping.application.buildId, ping.environment.build.buildId);
|
||||||
"TELEMETRY_TEST_COUNT",
|
|
||||||
"TELEMETRY_TEST_RELEASE_OPTOUT",
|
|
||||||
"TELEMETRY_TEST_RELEASE_OPTIN",
|
|
||||||
"STARTUP_CRASH_DETECTED",
|
|
||||||
]);
|
|
||||||
|
|
||||||
const stableKeyedHistograms = new Set([
|
|
||||||
"TELEMETRY_TEST_KEYED_FLAG",
|
|
||||||
"TELEMETRY_TEST_KEYED_COUNT",
|
|
||||||
"TELEMETRY_TEST_KEYED_RELEASE_OPTIN",
|
|
||||||
"TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
|
|
||||||
]);
|
|
||||||
|
|
||||||
// Compare the two sets of histograms.
|
|
||||||
// The "subsession" histograms should match the registered
|
|
||||||
// "classic" histograms. However, histograms can change
|
|
||||||
// between us collecting the different payloads, so we only
|
|
||||||
// check for deep equality on known stable histograms.
|
|
||||||
checkHistograms = (classic, subsession) => {
|
|
||||||
for (let id of Object.keys(classic)) {
|
|
||||||
if (!registeredIds.has(id)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
Assert.ok(id in subsession);
|
|
||||||
if (stableHistograms.has(id)) {
|
|
||||||
Assert.deepEqual(classic[id],
|
|
||||||
subsession[id]);
|
|
||||||
} else {
|
|
||||||
Assert.equal(classic[id].histogram_type,
|
|
||||||
subsession[id].histogram_type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Same as above, except for keyed histograms.
|
|
||||||
checkKeyedHistograms = (classic, subsession) => {
|
|
||||||
for (let id of Object.keys(classic)) {
|
|
||||||
if (!registeredIds.has(id)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
Assert.ok(id in subsession);
|
|
||||||
if (stableKeyedHistograms.has(id)) {
|
|
||||||
Assert.deepEqual(classic[id],
|
|
||||||
subsession[id]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Both classic and subsession payload histograms should start the same.
|
|
||||||
// The payloads should be identical for now except for the reason.
|
|
||||||
count.clear();
|
|
||||||
keyed.clear();
|
|
||||||
let classic = TelemetrySession.getPayload();
|
|
||||||
let subsession = TelemetrySession.getPayload("environment-change");
|
|
||||||
|
|
||||||
Assert.equal(classic.info.reason, "gather-payload");
|
|
||||||
Assert.equal(subsession.info.reason, "environment-change");
|
|
||||||
Assert.ok(!(COUNT_ID in classic.histograms));
|
|
||||||
Assert.ok(!(COUNT_ID in subsession.histograms));
|
|
||||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
|
||||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
|
||||||
Assert.deepEqual(classic.keyedHistograms[KEYED_ID], {});
|
|
||||||
Assert.deepEqual(subsession.keyedHistograms[KEYED_ID], {});
|
|
||||||
|
|
||||||
checkHistograms(classic.histograms, subsession.histograms);
|
|
||||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
|
||||||
|
|
||||||
// Adding values should get picked up in both.
|
|
||||||
count.add(1);
|
|
||||||
keyed.add("a", 1);
|
|
||||||
keyed.add("b", 1);
|
|
||||||
classic = TelemetrySession.getPayload();
|
|
||||||
subsession = TelemetrySession.getPayload("environment-change");
|
|
||||||
|
|
||||||
Assert.ok(COUNT_ID in classic.histograms);
|
|
||||||
Assert.ok(COUNT_ID in subsession.histograms);
|
|
||||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
|
||||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
|
||||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
|
||||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
|
||||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
|
||||||
|
|
||||||
checkHistograms(classic.histograms, subsession.histograms);
|
|
||||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
|
||||||
|
|
||||||
// Values should still reset properly.
|
|
||||||
count.clear();
|
|
||||||
keyed.clear();
|
|
||||||
classic = TelemetrySession.getPayload();
|
|
||||||
subsession = TelemetrySession.getPayload("environment-change");
|
|
||||||
|
|
||||||
Assert.ok(!(COUNT_ID in classic.histograms));
|
|
||||||
Assert.ok(!(COUNT_ID in subsession.histograms));
|
|
||||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
|
||||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
|
||||||
Assert.deepEqual(classic.keyedHistograms[KEYED_ID], {});
|
|
||||||
|
|
||||||
checkHistograms(classic.histograms, subsession.histograms);
|
|
||||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
|
||||||
|
|
||||||
// Adding values should get picked up in both.
|
|
||||||
count.add(1);
|
|
||||||
keyed.add("a", 1);
|
|
||||||
keyed.add("b", 1);
|
|
||||||
classic = TelemetrySession.getPayload();
|
|
||||||
subsession = TelemetrySession.getPayload("environment-change");
|
|
||||||
|
|
||||||
Assert.ok(COUNT_ID in classic.histograms);
|
|
||||||
Assert.ok(COUNT_ID in subsession.histograms);
|
|
||||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
|
||||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
|
||||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
|
||||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
|
||||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
|
||||||
|
|
||||||
checkHistograms(classic.histograms, subsession.histograms);
|
|
||||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
|
||||||
|
|
||||||
// We should be able to reset only the subsession histograms.
|
|
||||||
count.clear(true);
|
|
||||||
keyed.clear(true);
|
|
||||||
classic = TelemetrySession.getPayload();
|
|
||||||
subsession = TelemetrySession.getPayload("environment-change");
|
|
||||||
|
|
||||||
Assert.ok(COUNT_ID in classic.histograms);
|
|
||||||
Assert.ok(COUNT_ID in subsession.histograms);
|
|
||||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
|
||||||
Assert.equal(subsession.histograms[COUNT_ID].sum, 0);
|
|
||||||
|
|
||||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
|
||||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
|
||||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
|
||||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
|
||||||
Assert.deepEqual(subsession.keyedHistograms[KEYED_ID], {});
|
|
||||||
|
|
||||||
// Adding values should get picked up in both again.
|
|
||||||
count.add(1);
|
|
||||||
keyed.add("a", 1);
|
|
||||||
keyed.add("b", 1);
|
|
||||||
classic = TelemetrySession.getPayload();
|
|
||||||
subsession = TelemetrySession.getPayload("environment-change");
|
|
||||||
|
|
||||||
Assert.ok(COUNT_ID in classic.histograms);
|
|
||||||
Assert.ok(COUNT_ID in subsession.histograms);
|
|
||||||
Assert.equal(classic.histograms[COUNT_ID].sum, 2);
|
|
||||||
Assert.equal(subsession.histograms[COUNT_ID].sum, 1);
|
|
||||||
|
|
||||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
|
||||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
|
||||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 2);
|
|
||||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 2);
|
|
||||||
Assert.equal(subsession.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
|
||||||
Assert.equal(subsession.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Checks that an expired histogram file is deleted when loaded.
|
add_task(function* test_pingHasEnvironmentAndClientId() {
|
||||||
add_task(function* test_runOldPingFile() {
|
// Send a ping with the environment data and client id.
|
||||||
let histogramsFile = getSavedHistogramsFile("old-histograms.dat");
|
yield sendPing(true, true);
|
||||||
|
let request = yield gRequestIterator.next();
|
||||||
|
let ping = decodeRequestPayload(request);
|
||||||
|
checkPingFormat(ping, TEST_PING_TYPE, true, true);
|
||||||
|
|
||||||
yield TelemetrySession.testSaveHistograms(histogramsFile);
|
// Test a field in the environment build section.
|
||||||
do_check_true(histogramsFile.exists());
|
Assert.equal(ping.application.buildId, ping.environment.build.buildId);
|
||||||
let mtime = histogramsFile.lastModifiedTime;
|
// Test that we have the correct clientId.
|
||||||
histogramsFile.lastModifiedTime = mtime - (14 * 24 * 60 * 60 * 1000 + 60000); // 14 days, 1m
|
if (HAS_DATAREPORTINGSERVICE &&
|
||||||
|
Services.prefs.getBoolPref(PREF_FHR_UPLOAD_ENABLED)) {
|
||||||
yield TelemetrySession.testLoadHistograms(histogramsFile);
|
Assert.equal(ping.clientId, gDataReportingClientID,
|
||||||
do_check_false(histogramsFile.exists());
|
"The correct clientId must be reported.");
|
||||||
});
|
}
|
||||||
|
|
||||||
add_task(function* test_savedSessionClientID() {
|
|
||||||
// Assure that we store the ping properly when saving sessions on shutdown.
|
|
||||||
// We make the TelemetrySession shutdown to trigger a session save.
|
|
||||||
const dir = TelemetryFile.pingDirectoryPath;
|
|
||||||
yield OS.File.removeDir(dir, {ignoreAbsent: true});
|
|
||||||
yield OS.File.makeDir(dir);
|
|
||||||
yield TelemetrySession.shutdown();
|
|
||||||
|
|
||||||
yield TelemetryFile.loadSavedPings();
|
|
||||||
Assert.equal(TelemetryFile.pingsLoaded, 1);
|
|
||||||
let ping = TelemetryFile.popPendingPings().next();
|
|
||||||
Assert.equal(ping.value.payload.clientID, gDataReportingClientID);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
add_task(function* stopServer(){
|
add_task(function* stopServer(){
|
||||||
|
|
|
@ -17,12 +17,12 @@ const Ci = Components.interfaces;
|
||||||
const Cr = Components.results;
|
const Cr = Components.results;
|
||||||
const Cu = Components.utils;
|
const Cu = Components.utils;
|
||||||
|
|
||||||
|
Cu.import("resource://gre/modules/osfile.jsm", this);
|
||||||
Cu.import("resource://gre/modules/Services.jsm", this);
|
Cu.import("resource://gre/modules/Services.jsm", this);
|
||||||
Cu.import("resource://testing-common/httpd.js", this);
|
Cu.import("resource://testing-common/httpd.js", this);
|
||||||
Cu.import("resource://gre/modules/Promise.jsm", this);
|
Cu.import("resource://gre/modules/Promise.jsm", this);
|
||||||
Cu.import("resource://gre/modules/TelemetryFile.jsm", this);
|
Cu.import("resource://gre/modules/TelemetryFile.jsm", this);
|
||||||
Cu.import("resource://gre/modules/TelemetryPing.jsm", this);
|
Cu.import("resource://gre/modules/TelemetryPing.jsm", this);
|
||||||
Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
|
|
||||||
Cu.import("resource://gre/modules/Task.jsm", this);
|
Cu.import("resource://gre/modules/Task.jsm", this);
|
||||||
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
|
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
|
||||||
let {OS: {File, Path, Constants}} = Cu.import("resource://gre/modules/osfile.jsm", {});
|
let {OS: {File, Path, Constants}} = Cu.import("resource://gre/modules/osfile.jsm", {});
|
||||||
|
@ -53,70 +53,66 @@ let gCreatedPings = 0;
|
||||||
let gSeenPings = 0;
|
let gSeenPings = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates some TelemetrySession pings for the current session and
|
* Creates some Telemetry pings for the and saves them to disk. Each ping gets a
|
||||||
* saves them to disk. Each ping gets a unique ID slug based on
|
* unique ID based on an incrementor.
|
||||||
* an incrementor.
|
|
||||||
*
|
*
|
||||||
* @param aNum the number of pings to create.
|
* @param {Array} aPingInfos An array of ping type objects. Each entry must be an
|
||||||
* @param aAge the age in milliseconds to offset from now. A value
|
* object containing a "num" field for the number of pings to create and
|
||||||
* of 10 would make the ping 10ms older than now, for
|
* an "age" field. The latter representing the age in milliseconds to offset
|
||||||
* example.
|
* from now. A value of 10 would make the ping 10ms older than now, for
|
||||||
|
* example.
|
||||||
* @returns Promise
|
* @returns Promise
|
||||||
* @resolve an Array with the created pings.
|
* @resolve an Array with the created pings ids.
|
||||||
*/
|
*/
|
||||||
function createSavedPings(aNum, aAge) {
|
let createSavedPings = Task.async(function* (aPingInfos) {
|
||||||
return Task.spawn(function*(){
|
let pingIds = [];
|
||||||
let pings = [];
|
let now = Date.now();
|
||||||
let age = Date.now() - aAge;
|
|
||||||
|
|
||||||
for (let i = 0; i < aNum; ++i) {
|
for (let type in aPingInfos) {
|
||||||
let payload = TelemetrySession.getPayload();
|
let num = aPingInfos[type].num;
|
||||||
let ping = { slug: "test-ping-" + gCreatedPings, reason: "test", payload: payload };
|
let age = now - aPingInfos[type].age;
|
||||||
|
for (let i = 0; i < num; ++i) {
|
||||||
yield TelemetryFile.savePing(ping);
|
let pingId = yield TelemetryPing.testSavePingToFile("test-ping", {}, { overwrite: true });
|
||||||
|
if (aPingInfos[type].age) {
|
||||||
if (aAge) {
|
|
||||||
// savePing writes to the file synchronously, so we're good to
|
// savePing writes to the file synchronously, so we're good to
|
||||||
// modify the lastModifedTime now.
|
// modify the lastModifedTime now.
|
||||||
let file = getSavePathForPing(ping);
|
let filePath = getSavePathForPingId(pingId);
|
||||||
yield File.setDates(file, null, age);
|
yield File.setDates(filePath, null, age);
|
||||||
}
|
}
|
||||||
gCreatedPings++;
|
gCreatedPings++;
|
||||||
pings.push(ping);
|
pingIds.push(pingId);
|
||||||
}
|
}
|
||||||
return pings;
|
}
|
||||||
});
|
|
||||||
}
|
return pingIds;
|
||||||
|
});
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes locally saved pings in aPings if they
|
* Deletes locally saved pings if they exist.
|
||||||
* exist.
|
|
||||||
*
|
*
|
||||||
* @param aPings an Array of pings to delete.
|
* @param aPingIds an Array of ping ids to delete.
|
||||||
* @returns Promise
|
* @returns Promise
|
||||||
*/
|
*/
|
||||||
function clearPings(aPings) {
|
let clearPings = Task.async(function* (aPingIds) {
|
||||||
return Task.spawn(function*() {
|
for (let pingId of aPingIds) {
|
||||||
for (let ping of aPings) {
|
let filePath = getSavePathForPingId(pingId);
|
||||||
let path = getSavePathForPing(ping);
|
yield File.remove(filePath);
|
||||||
yield File.remove(path);
|
}
|
||||||
}
|
});
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a handle for the file that aPing should be
|
* Returns a handle for the file that a ping should be
|
||||||
* stored in locally.
|
* stored in locally.
|
||||||
*
|
*
|
||||||
* @returns path
|
* @returns path
|
||||||
*/
|
*/
|
||||||
function getSavePathForPing(aPing) {
|
function getSavePathForPingId(aPingId) {
|
||||||
return Path.join(Constants.Path.profileDir, PING_SAVE_FOLDER, aPing.slug);
|
return Path.join(Constants.Path.profileDir, PING_SAVE_FOLDER, aPingId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the number of TelemetrySession pings received by the
|
* Check if the number of Telemetry pings received by the HttpServer is not equal
|
||||||
* HttpServer is not equal to aExpectedNum.
|
* to aExpectedNum.
|
||||||
*
|
*
|
||||||
* @param aExpectedNum the number of pings we expect to receive.
|
* @param aExpectedNum the number of pings we expect to receive.
|
||||||
*/
|
*/
|
||||||
|
@ -125,30 +121,28 @@ function assertReceivedPings(aExpectedNum) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Throws if any pings in aPings is saved locally.
|
* Throws if any pings with the id in aPingIds is saved locally.
|
||||||
*
|
*
|
||||||
* @param aPings an Array of pings to check.
|
* @param aPingIds an Array of pings ids to check.
|
||||||
* @returns Promise
|
* @returns Promise
|
||||||
*/
|
*/
|
||||||
function assertNotSaved(aPings) {
|
let assertNotSaved = Task.async(function* (aPingIds) {
|
||||||
return Task.spawn(function*() {
|
let saved = 0;
|
||||||
let saved = 0;
|
for (let id of aPingIds) {
|
||||||
for (let ping of aPings) {
|
let filePath = getSavePathForPingId(id);
|
||||||
let file = getSavePathForPing(ping);
|
if (yield File.exists(filePath)) {
|
||||||
if (yield File.exists()) {
|
saved++;
|
||||||
saved++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (saved > 0) {
|
}
|
||||||
do_throw("Found " + saved + " unexpected saved pings.");
|
if (saved > 0) {
|
||||||
}
|
do_throw("Found " + saved + " unexpected saved pings.");
|
||||||
});
|
}
|
||||||
}
|
});
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Our handler function for the HttpServer that simply
|
* Our handler function for the HttpServer that simply
|
||||||
* increments the gSeenPings global when it successfully
|
* increments the gSeenPings global when it successfully
|
||||||
* receives and decodes a TelemetrySession payload.
|
* receives and decodes a Telemetry payload.
|
||||||
*
|
*
|
||||||
* @param aRequest the HTTP request sent from HttpServer.
|
* @param aRequest the HTTP request sent from HttpServer.
|
||||||
*/
|
*/
|
||||||
|
@ -174,7 +168,6 @@ function stopHttpServer() {
|
||||||
* Reset Telemetry state.
|
* Reset Telemetry state.
|
||||||
*/
|
*/
|
||||||
function resetTelemetry() {
|
function resetTelemetry() {
|
||||||
TelemetrySession.uninstall();
|
|
||||||
// Quick and dirty way to clear TelemetryFile's pendingPings
|
// Quick and dirty way to clear TelemetryFile's pendingPings
|
||||||
// collection, and put it back in its initial state.
|
// collection, and put it back in its initial state.
|
||||||
let gen = TelemetryFile.popPendingPings();
|
let gen = TelemetryFile.popPendingPings();
|
||||||
|
@ -189,10 +182,6 @@ function startTelemetry() {
|
||||||
return TelemetryPing.setup();
|
return TelemetryPing.setup();
|
||||||
}
|
}
|
||||||
|
|
||||||
function startTelemetrySession() {
|
|
||||||
return TelemetrySession.setup();
|
|
||||||
}
|
|
||||||
|
|
||||||
function run_test() {
|
function run_test() {
|
||||||
gHttpServer.registerPrefixHandler("/submit/telemetry/", pingHandler);
|
gHttpServer.registerPrefixHandler("/submit/telemetry/", pingHandler);
|
||||||
gHttpServer.start(-1);
|
gHttpServer.start(-1);
|
||||||
|
@ -209,13 +198,26 @@ function run_test() {
|
||||||
run_next_test();
|
run_next_test();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setup the tests by making sure the ping storage directory is available, otherwise
|
||||||
|
* |TelemetryPing.testSaveDirectoryToFile| could fail.
|
||||||
|
*/
|
||||||
|
add_task(function* setupEnvironment() {
|
||||||
|
yield TelemetryPing.setup();
|
||||||
|
|
||||||
|
let directory = TelemetryFile.pingDirectoryPath;
|
||||||
|
yield File.makeDir(directory, { ignoreExisting: true, unixMode: OS.Constants.S_IRWXU });
|
||||||
|
|
||||||
|
yield resetTelemetry();
|
||||||
|
});
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test that pings that are considered too old are just chucked out
|
* Test that pings that are considered too old are just chucked out
|
||||||
* immediately and never sent.
|
* immediately and never sent.
|
||||||
*/
|
*/
|
||||||
add_task(function* test_expired_pings_are_deleted() {
|
add_task(function* test_expired_pings_are_deleted() {
|
||||||
yield startTelemetrySession();
|
let pingTypes = [{ num: EXPIRED_PINGS, age: EXPIRED_PING_FILE_AGE }];
|
||||||
let expiredPings = yield createSavedPings(EXPIRED_PINGS, EXPIRED_PING_FILE_AGE);
|
let expiredPings = yield createSavedPings(pingTypes);
|
||||||
yield startTelemetry();
|
yield startTelemetry();
|
||||||
assertReceivedPings(0);
|
assertReceivedPings(0);
|
||||||
yield assertNotSaved(expiredPings);
|
yield assertNotSaved(expiredPings);
|
||||||
|
@ -226,8 +228,8 @@ add_task(function* test_expired_pings_are_deleted() {
|
||||||
* Test that really recent pings are not sent on Telemetry initialization.
|
* Test that really recent pings are not sent on Telemetry initialization.
|
||||||
*/
|
*/
|
||||||
add_task(function* test_recent_pings_not_sent() {
|
add_task(function* test_recent_pings_not_sent() {
|
||||||
yield startTelemetrySession();
|
let pingTypes = [{ num: RECENT_PINGS }];
|
||||||
let recentPings = yield createSavedPings(RECENT_PINGS);
|
let recentPings = yield createSavedPings(pingTypes);
|
||||||
yield startTelemetry();
|
yield startTelemetry();
|
||||||
assertReceivedPings(0);
|
assertReceivedPings(0);
|
||||||
yield resetTelemetry();
|
yield resetTelemetry();
|
||||||
|
@ -238,17 +240,20 @@ add_task(function* test_recent_pings_not_sent() {
|
||||||
* Test that only the most recent LRU_PINGS pings are kept at startup.
|
* Test that only the most recent LRU_PINGS pings are kept at startup.
|
||||||
*/
|
*/
|
||||||
add_task(function* test_most_recent_pings_kept() {
|
add_task(function* test_most_recent_pings_kept() {
|
||||||
yield startTelemetrySession();
|
let pingTypes = [
|
||||||
let head = yield createSavedPings(LRU_PINGS);
|
{ num: LRU_PINGS },
|
||||||
let tail = yield createSavedPings(3, ONE_MINUTE_MS);
|
{ num: 3, age: ONE_MINUTE_MS },
|
||||||
let pings = head.concat(tail);
|
];
|
||||||
|
let pings = yield createSavedPings(pingTypes);
|
||||||
|
let head = pings.slice(0, LRU_PINGS);
|
||||||
|
let tail = pings.slice(-3);
|
||||||
|
|
||||||
yield startTelemetry();
|
yield startTelemetry();
|
||||||
let gen = TelemetryFile.popPendingPings();
|
let gen = TelemetryFile.popPendingPings();
|
||||||
|
|
||||||
for (let item of gen) {
|
for (let item of gen) {
|
||||||
for (let p of tail) {
|
for (let id of tail) {
|
||||||
do_check_neq(p.slug, item.slug);
|
do_check_neq(id, item.id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,10 +268,15 @@ add_task(function* test_most_recent_pings_kept() {
|
||||||
* should just be deleted.
|
* should just be deleted.
|
||||||
*/
|
*/
|
||||||
add_task(function* test_overdue_pings_trigger_send() {
|
add_task(function* test_overdue_pings_trigger_send() {
|
||||||
yield startTelemetrySession();
|
let pingTypes = [
|
||||||
let recentPings = yield createSavedPings(RECENT_PINGS);
|
{ num: RECENT_PINGS },
|
||||||
let expiredPings = yield createSavedPings(EXPIRED_PINGS, EXPIRED_PING_FILE_AGE);
|
{ num: EXPIRED_PINGS, age: EXPIRED_PING_FILE_AGE },
|
||||||
let overduePings = yield createSavedPings(OVERDUE_PINGS, OVERDUE_PING_FILE_AGE);
|
{ num: OVERDUE_PINGS, age: OVERDUE_PING_FILE_AGE },
|
||||||
|
];
|
||||||
|
let pings = yield createSavedPings(pingTypes);
|
||||||
|
let recentPings = pings.slice(0, RECENT_PINGS);
|
||||||
|
let expiredPings = pings.slice(RECENT_PINGS, RECENT_PINGS + EXPIRED_PINGS);
|
||||||
|
let overduePings = pings.slice(-OVERDUE_PINGS);
|
||||||
|
|
||||||
yield startTelemetry();
|
yield startTelemetry();
|
||||||
assertReceivedPings(TOTAL_EXPECTED_PINGS);
|
assertReceivedPings(TOTAL_EXPECTED_PINGS);
|
||||||
|
|
|
@ -0,0 +1,762 @@
|
||||||
|
/* Any copyright is dedicated to the Public Domain.
|
||||||
|
http://creativecommons.org/publicdomain/zero/1.0/
|
||||||
|
*/
|
||||||
|
/* This testcase triggers two telemetry pings.
|
||||||
|
*
|
||||||
|
* Telemetry code keeps histograms of past telemetry pings. The first
|
||||||
|
* ping populates these histograms. One of those histograms is then
|
||||||
|
* checked in the second request.
|
||||||
|
*/
|
||||||
|
|
||||||
|
const Cc = Components.classes;
|
||||||
|
const Ci = Components.interfaces;
|
||||||
|
const Cu = Components.utils;
|
||||||
|
const Cr = Components.results;
|
||||||
|
|
||||||
|
Cu.import("resource://testing-common/httpd.js", this);
|
||||||
|
Cu.import("resource://gre/modules/Services.jsm");
|
||||||
|
Cu.import("resource://gre/modules/LightweightThemeManager.jsm", this);
|
||||||
|
Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
|
||||||
|
Cu.import("resource://gre/modules/TelemetryPing.jsm", this);
|
||||||
|
Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
|
||||||
|
Cu.import("resource://gre/modules/TelemetryFile.jsm", this);
|
||||||
|
Cu.import("resource://gre/modules/Task.jsm", this);
|
||||||
|
Cu.import("resource://gre/modules/Promise.jsm", this);
|
||||||
|
Cu.import("resource://gre/modules/Preferences.jsm");
|
||||||
|
Cu.import("resource://gre/modules/osfile.jsm", this);
|
||||||
|
|
||||||
|
const PING_FORMAT_VERSION = 2;
|
||||||
|
const PING_TYPE = "main";
|
||||||
|
|
||||||
|
const PLATFORM_VERSION = "1.9.2";
|
||||||
|
const APP_VERSION = "1";
|
||||||
|
const APP_ID = "xpcshell@tests.mozilla.org";
|
||||||
|
const APP_NAME = "XPCShell";
|
||||||
|
|
||||||
|
const IGNORE_HISTOGRAM = "test::ignore_me";
|
||||||
|
const IGNORE_HISTOGRAM_TO_CLONE = "MEMORY_HEAP_ALLOCATED";
|
||||||
|
const IGNORE_CLONED_HISTOGRAM = "test::ignore_me_also";
|
||||||
|
const ADDON_NAME = "Telemetry test addon";
|
||||||
|
const ADDON_HISTOGRAM = "addon-histogram";
|
||||||
|
// Add some unicode characters here to ensure that sending them works correctly.
|
||||||
|
const SHUTDOWN_TIME = 10000;
|
||||||
|
const FAILED_PROFILE_LOCK_ATTEMPTS = 2;
|
||||||
|
|
||||||
|
// Constants from prio.h for nsIFileOutputStream.init
|
||||||
|
const PR_WRONLY = 0x2;
|
||||||
|
const PR_CREATE_FILE = 0x8;
|
||||||
|
const PR_TRUNCATE = 0x20;
|
||||||
|
const RW_OWNER = parseInt("0600", 8);
|
||||||
|
|
||||||
|
const NUMBER_OF_THREADS_TO_LAUNCH = 30;
|
||||||
|
let gNumberOfThreadsLaunched = 0;
|
||||||
|
|
||||||
|
const PREF_BRANCH = "toolkit.telemetry.";
|
||||||
|
const PREF_ENABLED = PREF_BRANCH + "enabled";
|
||||||
|
const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
|
||||||
|
const PREF_FHR_SERVICE_ENABLED = "datareporting.healthreport.service.enabled";
|
||||||
|
|
||||||
|
const HAS_DATAREPORTINGSERVICE = "@mozilla.org/datareporting/service;1" in Cc;
|
||||||
|
const SESSION_RECORDER_EXPECTED = HAS_DATAREPORTINGSERVICE &&
|
||||||
|
Preferences.get(PREF_FHR_SERVICE_ENABLED, true);
|
||||||
|
|
||||||
|
const Telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(Ci.nsITelemetry);
|
||||||
|
|
||||||
|
let gHttpServer = new HttpServer();
|
||||||
|
let gServerStarted = false;
|
||||||
|
let gRequestIterator = null;
|
||||||
|
let gDataReportingClientID = null;
|
||||||
|
|
||||||
|
XPCOMUtils.defineLazyGetter(this, "gDatareportingService",
|
||||||
|
() => Cc["@mozilla.org/datareporting/service;1"]
|
||||||
|
.getService(Ci.nsISupports)
|
||||||
|
.wrappedJSObject);
|
||||||
|
|
||||||
|
function sendPing() {
|
||||||
|
TelemetrySession.gatherStartup();
|
||||||
|
if (gServerStarted) {
|
||||||
|
TelemetryPing.setServer("http://localhost:" + gHttpServer.identity.primaryPort);
|
||||||
|
return TelemetrySession.testPing();
|
||||||
|
} else {
|
||||||
|
TelemetryPing.setServer("http://doesnotexist");
|
||||||
|
return TelemetrySession.testPing();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function wrapWithExceptionHandler(f) {
|
||||||
|
function wrapper(...args) {
|
||||||
|
try {
|
||||||
|
f(...args);
|
||||||
|
} catch (ex if typeof(ex) == 'object') {
|
||||||
|
dump("Caught exception: " + ex.message + "\n");
|
||||||
|
dump(ex.stack);
|
||||||
|
do_test_finished();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return wrapper;
|
||||||
|
}
|
||||||
|
|
||||||
|
function registerPingHandler(handler) {
|
||||||
|
gHttpServer.registerPrefixHandler("/submit/telemetry/",
|
||||||
|
wrapWithExceptionHandler(handler));
|
||||||
|
}
|
||||||
|
|
||||||
|
function setupTestData() {
|
||||||
|
Telemetry.newHistogram(IGNORE_HISTOGRAM, "never", Telemetry.HISTOGRAM_BOOLEAN);
|
||||||
|
Telemetry.histogramFrom(IGNORE_CLONED_HISTOGRAM, IGNORE_HISTOGRAM_TO_CLONE);
|
||||||
|
Services.startup.interrupted = true;
|
||||||
|
Telemetry.registerAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM,
|
||||||
|
Telemetry.HISTOGRAM_LINEAR,
|
||||||
|
1, 5, 6);
|
||||||
|
let h1 = Telemetry.getAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM);
|
||||||
|
h1.add(1);
|
||||||
|
let h2 = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
|
||||||
|
h2.add();
|
||||||
|
|
||||||
|
let k1 = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
|
||||||
|
k1.add("a");
|
||||||
|
k1.add("a");
|
||||||
|
k1.add("b");
|
||||||
|
}
|
||||||
|
|
||||||
|
function getSavedPingFile(basename) {
|
||||||
|
let tmpDir = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||||
|
let pingFile = tmpDir.clone();
|
||||||
|
pingFile.append(basename);
|
||||||
|
if (pingFile.exists()) {
|
||||||
|
pingFile.remove(true);
|
||||||
|
}
|
||||||
|
do_register_cleanup(function () {
|
||||||
|
try {
|
||||||
|
pingFile.remove(true);
|
||||||
|
} catch (e) {
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return pingFile;
|
||||||
|
}
|
||||||
|
|
||||||
|
function decodeRequestPayload(request) {
|
||||||
|
let s = request.bodyInputStream;
|
||||||
|
let payload = null;
|
||||||
|
let decoder = Cc["@mozilla.org/dom/json;1"].createInstance(Ci.nsIJSON)
|
||||||
|
|
||||||
|
if (request.getHeader("content-encoding") == "gzip") {
|
||||||
|
let observer = {
|
||||||
|
buffer: "",
|
||||||
|
onStreamComplete: function(loader, context, status, length, result) {
|
||||||
|
this.buffer = String.fromCharCode.apply(this, result);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let scs = Cc["@mozilla.org/streamConverters;1"]
|
||||||
|
.getService(Ci.nsIStreamConverterService);
|
||||||
|
let listener = Cc["@mozilla.org/network/stream-loader;1"]
|
||||||
|
.createInstance(Ci.nsIStreamLoader);
|
||||||
|
listener.init(observer);
|
||||||
|
let converter = scs.asyncConvertData("gzip", "uncompressed",
|
||||||
|
listener, null);
|
||||||
|
converter.onStartRequest(null, null);
|
||||||
|
converter.onDataAvailable(null, null, s, 0, s.available());
|
||||||
|
converter.onStopRequest(null, null, null);
|
||||||
|
let unicodeConverter = Cc["@mozilla.org/intl/scriptableunicodeconverter"]
|
||||||
|
.createInstance(Ci.nsIScriptableUnicodeConverter);
|
||||||
|
unicodeConverter.charset = "UTF-8";
|
||||||
|
let utf8string = unicodeConverter.ConvertToUnicode(observer.buffer);
|
||||||
|
utf8string += unicodeConverter.Finish();
|
||||||
|
payload = decoder.decode(utf8string);
|
||||||
|
} else {
|
||||||
|
payload = decoder.decodeFromStream(s, s.available());
|
||||||
|
}
|
||||||
|
|
||||||
|
return payload;
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
|
||||||
|
const MANDATORY_PING_FIELDS = [
|
||||||
|
"type", "id", "creationDate", "version", "application", "payload"
|
||||||
|
];
|
||||||
|
|
||||||
|
const APPLICATION_TEST_DATA = {
|
||||||
|
buildId: "2007010101",
|
||||||
|
name: APP_NAME,
|
||||||
|
version: APP_VERSION,
|
||||||
|
vendor: "Mozilla",
|
||||||
|
platformVersion: PLATFORM_VERSION,
|
||||||
|
xpcomAbi: "noarch-spidermonkey",
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check that the ping contains all the mandatory fields.
|
||||||
|
for (let f of MANDATORY_PING_FIELDS) {
|
||||||
|
Assert.ok(f in aPing, f + "must be available.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.equal(aPing.type, aType, "The ping must have the correct type.");
|
||||||
|
Assert.equal(aPing.version, PING_FORMAT_VERSION, "The ping must have the correct version.");
|
||||||
|
|
||||||
|
// Test the application section.
|
||||||
|
for (let f in APPLICATION_TEST_DATA) {
|
||||||
|
Assert.equal(aPing.application[f], APPLICATION_TEST_DATA[f],
|
||||||
|
f + " must have the correct value.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can't check the values for channel and architecture. Just make
|
||||||
|
// sure they are in.
|
||||||
|
Assert.ok("architecture" in aPing.application,
|
||||||
|
"The application section must have an architecture field.");
|
||||||
|
Assert.ok("channel" in aPing.application,
|
||||||
|
"The application section must have a channel field.");
|
||||||
|
|
||||||
|
// Check the clientId and environment fields, as needed.
|
||||||
|
Assert.equal("clientId" in aPing, aHasClientId);
|
||||||
|
Assert.equal("environment" in aPing, aHasEnvironment);
|
||||||
|
}
|
||||||
|
|
||||||
|
function checkPayload(payload, reason, successfulPings) {
|
||||||
|
Assert.ok(payload.simpleMeasurements.uptime >= 0);
|
||||||
|
Assert.equal(payload.simpleMeasurements.startupInterrupted, 1);
|
||||||
|
Assert.equal(payload.simpleMeasurements.shutdownDuration, SHUTDOWN_TIME);
|
||||||
|
Assert.equal(payload.simpleMeasurements.savedPings, 1);
|
||||||
|
Assert.ok("maximalNumberOfConcurrentThreads" in payload.simpleMeasurements);
|
||||||
|
Assert.ok(payload.simpleMeasurements.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
|
||||||
|
|
||||||
|
let activeTicks = payload.simpleMeasurements.activeTicks;
|
||||||
|
Assert.ok(SESSION_RECORDER_EXPECTED ? activeTicks >= 0 : activeTicks == -1);
|
||||||
|
|
||||||
|
Assert.equal(payload.simpleMeasurements.failedProfileLockCount,
|
||||||
|
FAILED_PROFILE_LOCK_ATTEMPTS);
|
||||||
|
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||||
|
let failedProfileLocksFile = profileDirectory.clone();
|
||||||
|
failedProfileLocksFile.append("Telemetry.FailedProfileLocks.txt");
|
||||||
|
Assert.ok(!failedProfileLocksFile.exists());
|
||||||
|
|
||||||
|
|
||||||
|
let isWindows = ("@mozilla.org/windows-registry-key;1" in Components.classes);
|
||||||
|
if (isWindows) {
|
||||||
|
Assert.ok(payload.simpleMeasurements.startupSessionRestoreReadBytes > 0);
|
||||||
|
Assert.ok(payload.simpleMeasurements.startupSessionRestoreWriteBytes > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
const TELEMETRY_PING = "TELEMETRY_PING";
|
||||||
|
const TELEMETRY_SUCCESS = "TELEMETRY_SUCCESS";
|
||||||
|
const TELEMETRY_TEST_FLAG = "TELEMETRY_TEST_FLAG";
|
||||||
|
const TELEMETRY_TEST_COUNT = "TELEMETRY_TEST_COUNT";
|
||||||
|
const TELEMETRY_TEST_KEYED_FLAG = "TELEMETRY_TEST_KEYED_FLAG";
|
||||||
|
const TELEMETRY_TEST_KEYED_COUNT = "TELEMETRY_TEST_KEYED_COUNT";
|
||||||
|
const READ_SAVED_PING_SUCCESS = "READ_SAVED_PING_SUCCESS";
|
||||||
|
|
||||||
|
Assert.ok(TELEMETRY_PING in payload.histograms);
|
||||||
|
Assert.ok(READ_SAVED_PING_SUCCESS in payload.histograms);
|
||||||
|
Assert.ok(TELEMETRY_TEST_FLAG in payload.histograms);
|
||||||
|
Assert.ok(TELEMETRY_TEST_COUNT in payload.histograms);
|
||||||
|
|
||||||
|
let rh = Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []);
|
||||||
|
for (let name of rh) {
|
||||||
|
if (/SQLITE/.test(name) && name in payload.histograms) {
|
||||||
|
let histogramName = ("STARTUP_" + name);
|
||||||
|
Assert.ok(histogramName in payload.histograms, histogramName + " must be available.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Assert.ok(!(IGNORE_HISTOGRAM in payload.histograms));
|
||||||
|
Assert.ok(!(IGNORE_CLONED_HISTOGRAM in payload.histograms));
|
||||||
|
|
||||||
|
// Flag histograms should automagically spring to life.
|
||||||
|
const expected_flag = {
|
||||||
|
range: [1, 2],
|
||||||
|
bucket_count: 3,
|
||||||
|
histogram_type: 3,
|
||||||
|
values: {0:1, 1:0},
|
||||||
|
sum: 0,
|
||||||
|
sum_squares_lo: 0,
|
||||||
|
sum_squares_hi: 0
|
||||||
|
};
|
||||||
|
let flag = payload.histograms[TELEMETRY_TEST_FLAG];
|
||||||
|
Assert.equal(uneval(flag), uneval(expected_flag));
|
||||||
|
|
||||||
|
// We should have a test count.
|
||||||
|
const expected_count = {
|
||||||
|
range: [1, 2],
|
||||||
|
bucket_count: 3,
|
||||||
|
histogram_type: 4,
|
||||||
|
values: {0:1, 1:0},
|
||||||
|
sum: 1,
|
||||||
|
sum_squares_lo: 1,
|
||||||
|
sum_squares_hi: 0,
|
||||||
|
};
|
||||||
|
let count = payload.histograms[TELEMETRY_TEST_COUNT];
|
||||||
|
Assert.equal(uneval(count), uneval(expected_count));
|
||||||
|
|
||||||
|
// There should be one successful report from the previous telemetry ping.
|
||||||
|
const expected_tc = {
|
||||||
|
range: [1, 2],
|
||||||
|
bucket_count: 3,
|
||||||
|
histogram_type: 2,
|
||||||
|
values: {0:2, 1:successfulPings, 2:0},
|
||||||
|
sum: successfulPings,
|
||||||
|
sum_squares_lo: successfulPings,
|
||||||
|
sum_squares_hi: 0
|
||||||
|
};
|
||||||
|
let tc = payload.histograms[TELEMETRY_SUCCESS];
|
||||||
|
Assert.equal(uneval(tc), uneval(expected_tc));
|
||||||
|
|
||||||
|
let h = payload.histograms[READ_SAVED_PING_SUCCESS];
|
||||||
|
Assert.equal(h.values[0], 1);
|
||||||
|
|
||||||
|
// The ping should include data from memory reporters. We can't check that
|
||||||
|
// this data is correct, because we can't control the values returned by the
|
||||||
|
// memory reporters. But we can at least check that the data is there.
|
||||||
|
//
|
||||||
|
// It's important to check for the presence of reporters with a mix of units,
|
||||||
|
// because TelemetryPing has separate logic for each one. But we can't
|
||||||
|
// currently check UNITS_COUNT_CUMULATIVE or UNITS_PERCENTAGE because
|
||||||
|
// Telemetry doesn't touch a memory reporter with these units that's
|
||||||
|
// available on all platforms.
|
||||||
|
|
||||||
|
Assert.ok('MEMORY_JS_GC_HEAP' in payload.histograms); // UNITS_BYTES
|
||||||
|
Assert.ok('MEMORY_JS_COMPARTMENTS_SYSTEM' in payload.histograms); // UNITS_COUNT
|
||||||
|
|
||||||
|
// We should have included addon histograms.
|
||||||
|
Assert.ok("addonHistograms" in payload);
|
||||||
|
Assert.ok(ADDON_NAME in payload.addonHistograms);
|
||||||
|
Assert.ok(ADDON_HISTOGRAM in payload.addonHistograms[ADDON_NAME]);
|
||||||
|
|
||||||
|
Assert.ok(("mainThread" in payload.slowSQL) &&
|
||||||
|
("otherThreads" in payload.slowSQL));
|
||||||
|
|
||||||
|
// Check keyed histogram payload.
|
||||||
|
|
||||||
|
Assert.ok("keyedHistograms" in payload);
|
||||||
|
let keyedHistograms = payload.keyedHistograms;
|
||||||
|
Assert.ok(TELEMETRY_TEST_KEYED_FLAG in keyedHistograms);
|
||||||
|
Assert.ok(TELEMETRY_TEST_KEYED_COUNT in keyedHistograms);
|
||||||
|
|
||||||
|
Assert.deepEqual({}, keyedHistograms[TELEMETRY_TEST_KEYED_FLAG]);
|
||||||
|
|
||||||
|
const expected_keyed_count = {
|
||||||
|
"a": {
|
||||||
|
range: [1, 2],
|
||||||
|
bucket_count: 3,
|
||||||
|
histogram_type: 4,
|
||||||
|
values: {0:2, 1:0},
|
||||||
|
sum: 2,
|
||||||
|
sum_squares_lo: 2,
|
||||||
|
sum_squares_hi: 0,
|
||||||
|
},
|
||||||
|
"b": {
|
||||||
|
range: [1, 2],
|
||||||
|
bucket_count: 3,
|
||||||
|
histogram_type: 4,
|
||||||
|
values: {0:1, 1:0},
|
||||||
|
sum: 1,
|
||||||
|
sum_squares_lo: 1,
|
||||||
|
sum_squares_hi: 0,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
Assert.deepEqual(expected_keyed_count, keyedHistograms[TELEMETRY_TEST_KEYED_COUNT]);
|
||||||
|
}
|
||||||
|
|
||||||
|
function writeStringToFile(file, contents) {
|
||||||
|
let ostream = Cc["@mozilla.org/network/safe-file-output-stream;1"]
|
||||||
|
.createInstance(Ci.nsIFileOutputStream);
|
||||||
|
ostream.init(file, PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
|
||||||
|
RW_OWNER, ostream.DEFER_OPEN);
|
||||||
|
ostream.write(contents, contents.length);
|
||||||
|
ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
|
||||||
|
ostream.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
function write_fake_shutdown_file() {
|
||||||
|
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||||
|
let file = profileDirectory.clone();
|
||||||
|
file.append("Telemetry.ShutdownTime.txt");
|
||||||
|
let contents = "" + SHUTDOWN_TIME;
|
||||||
|
writeStringToFile(file, contents);
|
||||||
|
}
|
||||||
|
|
||||||
|
function write_fake_failedprofilelocks_file() {
|
||||||
|
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||||
|
let file = profileDirectory.clone();
|
||||||
|
file.append("Telemetry.FailedProfileLocks.txt");
|
||||||
|
let contents = "" + FAILED_PROFILE_LOCK_ATTEMPTS;
|
||||||
|
writeStringToFile(file, contents);
|
||||||
|
}
|
||||||
|
|
||||||
|
function run_test() {
|
||||||
|
do_test_pending();
|
||||||
|
|
||||||
|
// Addon manager needs a profile directory
|
||||||
|
do_get_profile();
|
||||||
|
loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
|
||||||
|
|
||||||
|
Services.prefs.setBoolPref(PREF_ENABLED, true);
|
||||||
|
Services.prefs.setBoolPref(PREF_FHR_UPLOAD_ENABLED, true);
|
||||||
|
|
||||||
|
// Send the needed startup notifications to the datareporting service
|
||||||
|
// to ensure that it has been initialized.
|
||||||
|
if (HAS_DATAREPORTINGSERVICE) {
|
||||||
|
gDatareportingService.observe(null, "app-startup", null);
|
||||||
|
gDatareportingService.observe(null, "profile-after-change", null);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make it look like we've previously failed to lock a profile a couple times.
|
||||||
|
write_fake_failedprofilelocks_file();
|
||||||
|
|
||||||
|
// Make it look like we've shutdown before.
|
||||||
|
write_fake_shutdown_file();
|
||||||
|
|
||||||
|
let currentMaxNumberOfThreads = Telemetry.maximalNumberOfConcurrentThreads;
|
||||||
|
do_check_true(currentMaxNumberOfThreads > 0);
|
||||||
|
|
||||||
|
// Try to augment the maximal number of threads currently launched
|
||||||
|
let threads = [];
|
||||||
|
try {
|
||||||
|
for (let i = 0; i < currentMaxNumberOfThreads + 10; ++i) {
|
||||||
|
threads.push(Services.tm.newThread(0));
|
||||||
|
}
|
||||||
|
} catch (ex) {
|
||||||
|
// If memory is too low, it is possible that not all threads will be launched.
|
||||||
|
}
|
||||||
|
gNumberOfThreadsLaunched = threads.length;
|
||||||
|
|
||||||
|
do_check_true(Telemetry.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
|
||||||
|
|
||||||
|
do_register_cleanup(function() {
|
||||||
|
threads.forEach(function(thread) {
|
||||||
|
thread.shutdown();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(run_next_test));
|
||||||
|
}
|
||||||
|
|
||||||
|
add_task(function* asyncSetup() {
|
||||||
|
yield TelemetrySession.setup();
|
||||||
|
yield TelemetryPing.setup();
|
||||||
|
|
||||||
|
if (HAS_DATAREPORTINGSERVICE) {
|
||||||
|
// force getSessionRecorder()==undefined to check the payload's activeTicks
|
||||||
|
gDatareportingService.simulateNoSessionRecorder();
|
||||||
|
}
|
||||||
|
|
||||||
|
// When no DRS or no DRS.getSessionRecorder(), activeTicks should be -1.
|
||||||
|
do_check_eq(TelemetrySession.getPayload().simpleMeasurements.activeTicks, -1);
|
||||||
|
|
||||||
|
if (HAS_DATAREPORTINGSERVICE) {
|
||||||
|
// Restore normal behavior for getSessionRecorder()
|
||||||
|
gDatareportingService.simulateRestoreSessionRecorder();
|
||||||
|
|
||||||
|
gDataReportingClientID = yield gDatareportingService.getClientID();
|
||||||
|
|
||||||
|
// We should have cached the client id now. Lets confirm that by
|
||||||
|
// checking the client id before the async ping setup is finished.
|
||||||
|
let promisePingSetup = TelemetryPing.reset();
|
||||||
|
do_check_eq(TelemetryPing.clientID, gDataReportingClientID);
|
||||||
|
yield promisePingSetup;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Ensures that expired histograms are not part of the payload.
|
||||||
|
add_task(function* test_expiredHistogram() {
|
||||||
|
let histogram_id = "FOOBAR";
|
||||||
|
let dummy = Telemetry.newHistogram(histogram_id, "30", Telemetry.HISTOGRAM_EXPONENTIAL, 1, 2, 3);
|
||||||
|
|
||||||
|
dummy.add(1);
|
||||||
|
|
||||||
|
do_check_eq(TelemetrySession.getPayload()["histograms"][histogram_id], undefined);
|
||||||
|
do_check_eq(TelemetrySession.getPayload()["histograms"]["TELEMETRY_TEST_EXPIRED"], undefined);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Checks that an invalid histogram file is deleted if TelemetryFile fails to parse it.
|
||||||
|
add_task(function* test_runInvalidJSON() {
|
||||||
|
let pingFile = getSavedPingFile("invalid-histograms.dat");
|
||||||
|
|
||||||
|
writeStringToFile(pingFile, "this.is.invalid.JSON");
|
||||||
|
do_check_true(pingFile.exists());
|
||||||
|
|
||||||
|
yield TelemetryFile.testLoadHistograms(pingFile);
|
||||||
|
do_check_false(pingFile.exists());
|
||||||
|
});
|
||||||
|
|
||||||
|
// Sends a ping to a non existing server. If we remove this test, we won't get
|
||||||
|
// all the histograms we need in the main ping.
|
||||||
|
add_task(function* test_noServerPing() {
|
||||||
|
yield sendPing();
|
||||||
|
// We need two pings in order to make sure STARTUP_MEMORY_STORAGE_SQLIE histograms
|
||||||
|
// are initialised. See bug 1131585.
|
||||||
|
yield sendPing();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Checks that a sent ping is correctly received by a dummy http server.
|
||||||
|
add_task(function* test_simplePing() {
|
||||||
|
gHttpServer.start(-1);
|
||||||
|
gServerStarted = true;
|
||||||
|
gRequestIterator = Iterator(new Request());
|
||||||
|
|
||||||
|
yield sendPing();
|
||||||
|
let request = yield gRequestIterator.next();
|
||||||
|
let ping = decodeRequestPayload(request);
|
||||||
|
|
||||||
|
checkPingFormat(ping, PING_TYPE, true, true);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Saves the current session histograms, reloads them, performs a ping
|
||||||
|
// and checks that the dummy http server received both the previously
|
||||||
|
// saved histograms and the new ones.
|
||||||
|
add_task(function* test_saveLoadPing() {
|
||||||
|
let histogramsFile = getSavedPingFile("saved-histograms.dat");
|
||||||
|
|
||||||
|
setupTestData();
|
||||||
|
yield TelemetrySession.testSaveHistograms(histogramsFile);
|
||||||
|
yield TelemetryFile.testLoadHistograms(histogramsFile);
|
||||||
|
yield sendPing();
|
||||||
|
|
||||||
|
// Get requests received by dummy server.
|
||||||
|
let request1 = yield gRequestIterator.next();
|
||||||
|
let request2 = yield gRequestIterator.next();
|
||||||
|
|
||||||
|
Assert.equal(request1.getHeader("content-type"), "application/json; charset=UTF-8",
|
||||||
|
"The request must have the correct content-type.");
|
||||||
|
Assert.equal(request2.getHeader("content-type"), "application/json; charset=UTF-8",
|
||||||
|
"The request must have the correct content-type.");
|
||||||
|
|
||||||
|
// We decode both requests to check for the |reason|.
|
||||||
|
let ping1 = decodeRequestPayload(request1);
|
||||||
|
let ping2 = decodeRequestPayload(request2);
|
||||||
|
|
||||||
|
checkPingFormat(ping1, PING_TYPE, true, true);
|
||||||
|
checkPingFormat(ping2, PING_TYPE, true, true);
|
||||||
|
|
||||||
|
// Check we have the correct two requests. Ordering is not guaranteed.
|
||||||
|
if (ping1.payload.info.reason === "test-ping") {
|
||||||
|
// Until we change MainPing according to bug 1120982, common ping payload
|
||||||
|
// will contain another nested payload.
|
||||||
|
checkPayload(ping1.payload, "test-ping", 1);
|
||||||
|
checkPayload(ping2.payload, "saved-session", 1);
|
||||||
|
} else {
|
||||||
|
checkPayload(ping1.payload, "saved-session", 1);
|
||||||
|
checkPayload(ping2.payload, "test-ping", 1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
add_task(function* test_checkSubsession() {
|
||||||
|
const COUNT_ID = "TELEMETRY_TEST_COUNT";
|
||||||
|
const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
|
||||||
|
const count = Telemetry.getHistogramById(COUNT_ID);
|
||||||
|
const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
|
||||||
|
const registeredIds =
|
||||||
|
new Set(Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []));
|
||||||
|
|
||||||
|
const stableHistograms = new Set([
|
||||||
|
"TELEMETRY_TEST_FLAG",
|
||||||
|
"TELEMETRY_TEST_COUNT",
|
||||||
|
"TELEMETRY_TEST_RELEASE_OPTOUT",
|
||||||
|
"TELEMETRY_TEST_RELEASE_OPTIN",
|
||||||
|
"STARTUP_CRASH_DETECTED",
|
||||||
|
]);
|
||||||
|
|
||||||
|
const stableKeyedHistograms = new Set([
|
||||||
|
"TELEMETRY_TEST_KEYED_FLAG",
|
||||||
|
"TELEMETRY_TEST_KEYED_COUNT",
|
||||||
|
"TELEMETRY_TEST_KEYED_RELEASE_OPTIN",
|
||||||
|
"TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
|
||||||
|
]);
|
||||||
|
|
||||||
|
// Compare the two sets of histograms.
|
||||||
|
// The "subsession" histograms should match the registered
|
||||||
|
// "classic" histograms. However, histograms can change
|
||||||
|
// between us collecting the different payloads, so we only
|
||||||
|
// check for deep equality on known stable histograms.
|
||||||
|
checkHistograms = (classic, subsession) => {
|
||||||
|
for (let id of Object.keys(classic)) {
|
||||||
|
if (!registeredIds.has(id)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.ok(id in subsession);
|
||||||
|
if (stableHistograms.has(id)) {
|
||||||
|
Assert.deepEqual(classic[id],
|
||||||
|
subsession[id]);
|
||||||
|
} else {
|
||||||
|
Assert.equal(classic[id].histogram_type,
|
||||||
|
subsession[id].histogram_type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Same as above, except for keyed histograms.
|
||||||
|
checkKeyedHistograms = (classic, subsession) => {
|
||||||
|
for (let id of Object.keys(classic)) {
|
||||||
|
if (!registeredIds.has(id)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Assert.ok(id in subsession);
|
||||||
|
if (stableKeyedHistograms.has(id)) {
|
||||||
|
Assert.deepEqual(classic[id],
|
||||||
|
subsession[id]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Both classic and subsession payload histograms should start the same.
|
||||||
|
// The payloads should be identical for now except for the reason.
|
||||||
|
count.clear();
|
||||||
|
keyed.clear();
|
||||||
|
let classic = TelemetrySession.getPayload();
|
||||||
|
let subsession = TelemetrySession.getPayload("environment-change");
|
||||||
|
|
||||||
|
Assert.equal(classic.info.reason, "gather-payload");
|
||||||
|
Assert.equal(subsession.info.reason, "environment-change");
|
||||||
|
Assert.ok(!(COUNT_ID in classic.histograms));
|
||||||
|
Assert.ok(!(COUNT_ID in subsession.histograms));
|
||||||
|
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||||
|
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||||
|
Assert.deepEqual(classic.keyedHistograms[KEYED_ID], {});
|
||||||
|
Assert.deepEqual(subsession.keyedHistograms[KEYED_ID], {});
|
||||||
|
|
||||||
|
checkHistograms(classic.histograms, subsession.histograms);
|
||||||
|
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||||
|
|
||||||
|
// Adding values should get picked up in both.
|
||||||
|
count.add(1);
|
||||||
|
keyed.add("a", 1);
|
||||||
|
keyed.add("b", 1);
|
||||||
|
classic = TelemetrySession.getPayload();
|
||||||
|
subsession = TelemetrySession.getPayload("environment-change");
|
||||||
|
|
||||||
|
Assert.ok(COUNT_ID in classic.histograms);
|
||||||
|
Assert.ok(COUNT_ID in subsession.histograms);
|
||||||
|
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||||
|
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||||
|
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||||
|
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||||
|
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||||
|
|
||||||
|
checkHistograms(classic.histograms, subsession.histograms);
|
||||||
|
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||||
|
|
||||||
|
// Values should still reset properly.
|
||||||
|
count.clear();
|
||||||
|
keyed.clear();
|
||||||
|
classic = TelemetrySession.getPayload();
|
||||||
|
subsession = TelemetrySession.getPayload("environment-change");
|
||||||
|
|
||||||
|
Assert.ok(!(COUNT_ID in classic.histograms));
|
||||||
|
Assert.ok(!(COUNT_ID in subsession.histograms));
|
||||||
|
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||||
|
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||||
|
Assert.deepEqual(classic.keyedHistograms[KEYED_ID], {});
|
||||||
|
|
||||||
|
checkHistograms(classic.histograms, subsession.histograms);
|
||||||
|
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||||
|
|
||||||
|
// Adding values should get picked up in both.
|
||||||
|
count.add(1);
|
||||||
|
keyed.add("a", 1);
|
||||||
|
keyed.add("b", 1);
|
||||||
|
classic = TelemetrySession.getPayload();
|
||||||
|
subsession = TelemetrySession.getPayload("environment-change");
|
||||||
|
|
||||||
|
Assert.ok(COUNT_ID in classic.histograms);
|
||||||
|
Assert.ok(COUNT_ID in subsession.histograms);
|
||||||
|
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||||
|
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||||
|
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||||
|
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||||
|
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||||
|
|
||||||
|
checkHistograms(classic.histograms, subsession.histograms);
|
||||||
|
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||||
|
|
||||||
|
// We should be able to reset only the subsession histograms.
|
||||||
|
count.clear(true);
|
||||||
|
keyed.clear(true);
|
||||||
|
classic = TelemetrySession.getPayload();
|
||||||
|
subsession = TelemetrySession.getPayload("environment-change");
|
||||||
|
|
||||||
|
Assert.ok(COUNT_ID in classic.histograms);
|
||||||
|
Assert.ok(COUNT_ID in subsession.histograms);
|
||||||
|
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||||
|
Assert.equal(subsession.histograms[COUNT_ID].sum, 0);
|
||||||
|
|
||||||
|
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||||
|
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||||
|
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||||
|
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||||
|
Assert.deepEqual(subsession.keyedHistograms[KEYED_ID], {});
|
||||||
|
|
||||||
|
// Adding values should get picked up in both again.
|
||||||
|
count.add(1);
|
||||||
|
keyed.add("a", 1);
|
||||||
|
keyed.add("b", 1);
|
||||||
|
classic = TelemetrySession.getPayload();
|
||||||
|
subsession = TelemetrySession.getPayload("environment-change");
|
||||||
|
|
||||||
|
Assert.ok(COUNT_ID in classic.histograms);
|
||||||
|
Assert.ok(COUNT_ID in subsession.histograms);
|
||||||
|
Assert.equal(classic.histograms[COUNT_ID].sum, 2);
|
||||||
|
Assert.equal(subsession.histograms[COUNT_ID].sum, 1);
|
||||||
|
|
||||||
|
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||||
|
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||||
|
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 2);
|
||||||
|
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 2);
|
||||||
|
Assert.equal(subsession.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||||
|
Assert.equal(subsession.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Checks that an expired histogram file is deleted when loaded.
|
||||||
|
add_task(function* test_runOldPingFile() {
|
||||||
|
let histogramsFile = getSavedPingFile("old-histograms.dat");
|
||||||
|
|
||||||
|
yield TelemetrySession.testSaveHistograms(histogramsFile);
|
||||||
|
do_check_true(histogramsFile.exists());
|
||||||
|
let mtime = histogramsFile.lastModifiedTime;
|
||||||
|
histogramsFile.lastModifiedTime = mtime - (14 * 24 * 60 * 60 * 1000 + 60000); // 14 days, 1m
|
||||||
|
|
||||||
|
yield TelemetryFile.testLoadHistograms(histogramsFile);
|
||||||
|
do_check_false(histogramsFile.exists());
|
||||||
|
});
|
||||||
|
|
||||||
|
add_task(function* test_savedSessionClientID() {
|
||||||
|
// Assure that we store the ping properly when saving sessions on shutdown.
|
||||||
|
// We make the TelemetrySession shutdown to trigger a session save.
|
||||||
|
const dir = TelemetryFile.pingDirectoryPath;
|
||||||
|
yield OS.File.removeDir(dir, {ignoreAbsent: true});
|
||||||
|
yield OS.File.makeDir(dir);
|
||||||
|
yield TelemetrySession.shutdown();
|
||||||
|
|
||||||
|
yield TelemetryFile.loadSavedPings();
|
||||||
|
Assert.equal(TelemetryFile.pingsLoaded, 1);
|
||||||
|
let ping = TelemetryFile.popPendingPings().next();
|
||||||
|
Assert.equal(ping.value.clientId, gDataReportingClientID);
|
||||||
|
});
|
||||||
|
|
||||||
|
add_task(function* stopServer(){
|
||||||
|
gHttpServer.stop(do_test_finished);
|
||||||
|
});
|
||||||
|
|
||||||
|
// An iterable sequence of http requests
|
||||||
|
function Request() {
|
||||||
|
let defers = [];
|
||||||
|
let current = 0;
|
||||||
|
|
||||||
|
function RequestIterator() {}
|
||||||
|
|
||||||
|
// Returns a promise that resolves to the next http request
|
||||||
|
RequestIterator.prototype.next = function() {
|
||||||
|
let deferred = defers[current++];
|
||||||
|
return deferred.promise;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.__iterator__ = function(){
|
||||||
|
return new RequestIterator();
|
||||||
|
}
|
||||||
|
|
||||||
|
registerPingHandler((request, response) => {
|
||||||
|
let deferred = defers[defers.length - 1];
|
||||||
|
defers.push(Promise.defer());
|
||||||
|
deferred.resolve(request);
|
||||||
|
});
|
||||||
|
|
||||||
|
defers.push(Promise.defer());
|
||||||
|
}
|
|
@ -34,5 +34,6 @@ generated-files =
|
||||||
[test_ThirdPartyCookieProbe.js]
|
[test_ThirdPartyCookieProbe.js]
|
||||||
[test_TelemetrySendOldPings.js]
|
[test_TelemetrySendOldPings.js]
|
||||||
skip-if = debug == true || os == "android" # Disabled due to intermittent orange on Android
|
skip-if = debug == true || os == "android" # Disabled due to intermittent orange on Android
|
||||||
|
[test_TelemetrySession.js]
|
||||||
[test_ThreadHangStats.js]
|
[test_ThreadHangStats.js]
|
||||||
run-sequentially = Bug 1046307, test can fail intermittently when CPU load is high
|
run-sequentially = Bug 1046307, test can fail intermittently when CPU load is high
|
||||||
|
|
|
@ -65,7 +65,7 @@ add_task(function* actualTest() {
|
||||||
do_check_true(simpleMeasurements.bar > 1); // bar was included
|
do_check_true(simpleMeasurements.bar > 1); // bar was included
|
||||||
do_check_eq(undefined, simpleMeasurements.baz); // baz wasn't included since it wasn't added
|
do_check_eq(undefined, simpleMeasurements.baz); // baz wasn't included since it wasn't added
|
||||||
|
|
||||||
yield TelemetrySession.shutdown();
|
yield TelemetrySession.shutdown(false);
|
||||||
|
|
||||||
do_test_finished();
|
do_test_finished();
|
||||||
});
|
});
|
||||||
|
|
|
@ -514,5 +514,5 @@ add_test(function overrides_retrieved() {
|
||||||
});
|
});
|
||||||
|
|
||||||
add_test(function test_shutdown() {
|
add_test(function test_shutdown() {
|
||||||
TelemetrySession.shutdown().then(run_next_test);
|
TelemetrySession.shutdown(false).then(run_next_test);
|
||||||
});
|
});
|
||||||
|
|
Загрузка…
Ссылка в новой задаче