Bug 1392391 - [reftest] Pre-parse the manifests in a separate Firefox instance, r=jmaher

Instead of parsing the manifests and running the tests all in one go, this will
spawn an extra Firefox instance at the beginning that does nothing but parse the
manifest and dump them to a file.

This will allow the python harness to load and manipulate the test objects, before
sending them back to the JS harness as a list of tests to run. The main motivation
for this change is to implement run-by-manifest, a mode where we restart the
browser in between every test manifest. But there are other benefits as well, like
sharing the chunking logic used by other harnesses and the ability for the python
harness to stuff arbitrary metadata into the test objects.

For now, Android will continue to parse the manifests and run the tests all in one
go. Converting Android to this new mechanism will be left to a follow-up bug.


MozReview-Commit-ID: AfUBmQpx3Zz

--HG--
extra : rebase_source : 955966c07bb650946c7c0e5706856f028335e850
This commit is contained in:
Andrew Halberstadt 2018-02-01 14:18:00 -05:00
Родитель 33d2269419
Коммит 00015fe0a8
6 изменённых файлов: 232 добавлений и 116 удалений

Просмотреть файл

@ -142,6 +142,7 @@ for (let [key, val] of Object.entries({
startAfter: undefined,
suiteStarted: false,
manageSuite: false,
// The enabled-state of the test-plugins, stored so they can be reset later
testPluginEnabledStates: null,

Просмотреть файл

@ -5,7 +5,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
"use strict";
this.EXPORTED_SYMBOLS = ["ReadTopManifest"];
this.EXPORTED_SYMBOLS = ["ReadTopManifest", "CreateUrls"];
var CC = Components.classes;
const CI = Components.interfaces;
@ -706,14 +706,14 @@ function AddTestItem(aTest, aFilter) {
if (!aFilter)
aFilter = [null, [], false];
aTest = CreateUrls(aTest);
var {url1, url2} = CreateUrls(Object.assign({}, aTest));
var globalFilter = aFilter[0];
var manifestFilter = aFilter[1];
var invertManifest = aFilter[2];
if ((globalFilter && !globalFilter.test(aTest.url1.spec)) ||
if ((globalFilter && !globalFilter.test(url1.spec)) ||
(manifestFilter &&
!(invertManifest ^ manifestFilter.test(aTest.url1.spec))))
!(invertManifest ^ manifestFilter.test(url1.spec))))
return;
if (g.focusFilterMode == FOCUS_FILTER_NEEDS_FOCUS_TESTS &&
!aTest.needsFocus)
@ -722,9 +722,9 @@ function AddTestItem(aTest, aFilter) {
aTest.needsFocus)
return;
if (aTest.url2 !== null)
aTest.identifier = [aTest.url1.spec, aTest.type, aTest.url2.spec];
if (url2 !== null)
aTest.identifier = [url1.spec, aTest.type, url2.spec];
else
aTest.identifier = aTest.url1.spec;
aTest.identifier = url1.spec;
g.urls.push(aTest);
}

Просмотреть файл

@ -4,6 +4,7 @@
import json
import threading
from collections import defaultdict
from mozlog.formatters import TbplFormatter
from mozrunner.utils import get_stack_fixer_function
@ -129,6 +130,7 @@ class OutputHandler(object):
self.stack_fixer_function = get_stack_fixer_function(utilityPath, symbolsPath)
self.log = log
self.proc_name = None
self.results = defaultdict(int)
def __call__(self, line):
# need to return processed messages to appease remoteautomation.py
@ -143,6 +145,10 @@ class OutputHandler(object):
return [line]
if isinstance(data, dict) and 'action' in data:
if data['action'] == 'results':
for k, v in data['results'].items():
self.results[k] += v
else:
self.log.log_raw(data)
else:
self.verbatim(json.dumps(data))

Просмотреть файл

@ -356,15 +356,39 @@ function ReadTests() {
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
getService(Components.interfaces.nsIPrefBranch);
// Parse reftest manifests
try {
var manifests = JSON.parse(prefs.getCharPref("reftest.manifests"));
g.urlsFilterRegex = manifests[null];
} catch(e) {
logger.error("Unable to find reftest.manifests pref. Please ensure your profile is setup properly");
/* There are three modes implemented here:
* 1) reftest.manifests
* 2) reftest.manifests and reftest.manifests.dumpTests
* 3) reftest.tests
*
* The first will parse the specified manifests, then immediately
* run the tests. The second will parse the manifests, save the test
* objects to a file and exit. The third will load a file of test
* objects and run them.
*
* The latter two modes are used to pass test data back and forth
* with python harness.
*/
let manifests = prefs.getCharPref("reftest.manifests", null);
let dumpTests = prefs.getCharPref("reftest.manifests.dumpTests", null);
let testList = prefs.getCharPref("reftest.tests", null);
if ((testList && manifests) || !(testList || manifests)) {
logger.error("Exactly one of reftest.manifests or reftest.tests must be specified.");
DoneTests();
}
if (testList) {
let promise = OS.File.read(testList).then(function onSuccess(array) {
let decoder = new TextDecoder();
g.urls = JSON.parse(decoder.decode(array)).map(CreateUrls);
StartTests();
});
} else if (manifests) {
// Parse reftest manifests
manifests = JSON.parse(manifests);
g.urlsFilterRegex = manifests[null];
var globalFilter = manifests.hasOwnProperty("") ? new RegExp(manifests[""]) : null;
var manifestURLs = Object.keys(manifests);
@ -377,7 +401,24 @@ function ReadTests() {
ReadTopManifest(manifestURL, [globalFilter, filter, false]);
});
if (dumpTests) {
let encoder = new TextEncoder();
let tests = encoder.encode(JSON.stringify(g.urls));
OS.File.writeAtomic(dumpTests, tests, {flush: true}).then(
function onSuccess() {
DoneTests();
},
function onFailure(reason) {
logger.error("failed to write test data: " + reason);
DoneTests();
}
)
} else {
g.manageSuite = true;
g.urls = g.urls.map(CreateUrls);
StartTests();
}
}
} catch(e) {
++g.testResults.Exception;
logger.error("EXCEPTION: " + e);
@ -461,7 +502,7 @@ function StartTests()
g.urls = g.urls.slice(start, end);
}
if (g.startAfter === undefined && !g.suiteStarted) {
if (g.manageSuite && g.startAfter === undefined && !g.suiteStarted) {
var ids = g.urls.map(function(obj) {
return obj.identifier;
});
@ -737,8 +778,12 @@ function StartCurrentURI(aURLTargetType)
function DoneTests()
{
logger.suiteEnd({'results': g.testResults});
if (g.manageSuite) {
g.suiteStarted = false
logger.suiteEnd({'results': g.testResults});
} else {
logger._logData('results', {results: g.testResults});
}
logger.info("Slowest test took " + g.slowestTestTime + "ms (" + g.slowestTestURL + ")");
logger.info("Total canvas count = " + g.recycledCanvases.length);
if (g.failedUseWidgetLayers) {

Просмотреть файл

@ -2,16 +2,16 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from contextlib import closing
import sys
import logging
import os
import psutil
import signal
import time
import sys
import tempfile
import time
import traceback
import urllib2
from contextlib import closing
import mozdevice
import mozinfo
@ -143,6 +143,7 @@ class ReftestServer:
class RemoteReftest(RefTest):
use_marionette = False
parse_manifest = False
remoteApp = ''
resolver_cls = RemoteReftestResolver
@ -167,11 +168,11 @@ class RemoteReftest(RefTest):
self._devicemanager.removeDir(self.remoteCache)
self._populate_logger(options)
outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
self.outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
# RemoteAutomation.py's 'messageLogger' is also used by mochitest. Mimic a mochitest
# MessageLogger object to re-use this code path.
outputHandler.write = outputHandler.__call__
self.automation._processArgs['messageLogger'] = outputHandler
self.outputHandler.write = self.outputHandler.__call__
self.automation._processArgs['messageLogger'] = self.outputHandler
def findPath(self, paths, filename=None):
for path in paths:
@ -259,12 +260,12 @@ class RemoteReftest(RefTest):
# may not be able to access process info for all processes
continue
def createReftestProfile(self, options, manifest, startAfter=None):
def createReftestProfile(self, options, startAfter=None, **kwargs):
profile = RefTest.createReftestProfile(self,
options,
manifest,
server=options.remoteWebServer,
port=options.httpPort)
port=options.httpPort,
**kwargs)
if startAfter is not None:
print ("WARNING: Continuing after a crash is not supported for remote "
"reftest yet.")
@ -333,10 +334,21 @@ class RemoteReftest(RefTest):
del browserEnv["XPCOM_MEM_BLOAT_LOG"]
return browserEnv
def runApp(self, profile, binary, cmdargs, env,
timeout=None, debuggerInfo=None,
symbolsPath=None, options=None,
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
def runApp(self, options, cmdargs=None, timeout=None, debuggerInfo=None, symbolsPath=None,
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None, **profileArgs):
if cmdargs is None:
cmdargs = []
if self.use_marionette:
cmdargs.append('-marionette')
binary = options.app
profile = self.createReftestProfile(options, **profileArgs)
# browser environment
env = self.buildBrowserEnv(options, profile.profile)
self.log.info("Running with e10s: {}".format(options.e10s))
status, lastTestSeen = self.automation.runApp(None, env,
binary,
profile.profile,
@ -349,7 +361,9 @@ class RemoteReftest(RefTest):
if status == 1:
# when max run time exceeded, avoid restart
lastTestSeen = RefTest.TEST_SEEN_FINAL
return status, lastTestSeen
self.cleanup(profile.profile)
return status, lastTestSeen, self.outputHandler.results
def cleanup(self, profileDir):
# Pull results back from device

Просмотреть файл

@ -18,6 +18,7 @@ import shutil
import signal
import subprocess
import sys
import tempfile
import threading
from datetime import datetime, timedelta
@ -28,12 +29,14 @@ if SCRIPT_DIRECTORY not in sys.path:
import mozcrash
import mozdebug
import mozfile
import mozinfo
import mozleak
import mozlog
import mozprocess
import mozprofile
import mozrunner
from manifestparser import TestManifest
from mozrunner.utils import get_stack_fixer_function, test_environment
from mozscreenshot import printstatus, dump_screen
@ -226,9 +229,10 @@ class ReftestResolver(object):
class RefTest(object):
TEST_SEEN_INITIAL = 'reftest'
TEST_SEEN_FINAL = 'Main app process exited normally'
use_marionette = True
oldcwd = os.getcwd()
parse_manifest = True
resolver_cls = ReftestResolver
use_marionette = True
def __init__(self):
update_mozinfo()
@ -236,6 +240,7 @@ class RefTest(object):
self.haveDumpedScreen = False
self.resolver = self.resolver_cls()
self.log = None
self.testDumpFile = os.path.join(tempfile.gettempdir(), 'reftests.json')
def _populate_logger(self, options):
if self.log:
@ -259,17 +264,21 @@ class RefTest(object):
"Get an absolute path relative to self.oldcwd."
return os.path.normpath(os.path.join(self.oldcwd, os.path.expanduser(path)))
def createReftestProfile(self, options, manifests, server='localhost', port=0,
profile_to_clone=None, startAfter=None):
def createReftestProfile(self, options, tests=None, manifests=None,
server='localhost', port=0, profile_to_clone=None,
startAfter=None, prefs=None):
"""Sets up a profile for reftest.
:param options: Object containing command line options
:param manifests: Dictionary of the form {manifest_path: [filters]}
:param tests: List of test objects to run
:param manifests: List of manifest files to parse (only takes effect
if tests were not passed in)
:param server: Server name to use for http tests
:param profile_to_clone: Path to a profile to use as the basis for the
test profile
:param startAfter: Start running tests after the specified test id
:param prefs: Extra preferences to set in the profile
"""
locations = mozprofile.permissions.ServerLocations()
locations.add_host(server, scheme='http', port=port)
locations.add_host(server, scheme='https', port=port)
@ -277,7 +286,7 @@ class RefTest(object):
# Set preferences for communication between our command line arguments
# and the reftest harness. Preferences that are required for reftest
# to work should instead be set in reftest-preferences.js .
prefs = {}
prefs = prefs or {}
prefs['reftest.timeout'] = options.timeout * 1000
if options.totalChunks:
prefs['reftest.totalChunks'] = options.totalChunks
@ -299,7 +308,6 @@ class RefTest(object):
prefs['reftest.cleanupPendingCrashes'] = True
prefs['reftest.focusFilterMode'] = options.focusFilterMode
prefs['reftest.logLevel'] = options.log_tbpl_level or 'info'
prefs['reftest.manifests'] = json.dumps(manifests)
prefs['reftest.suite'] = options.suite
if startAfter not in (None, self.TEST_SEEN_INITIAL, self.TEST_SEEN_FINAL):
@ -380,6 +388,14 @@ class RefTest(object):
else:
profile = mozprofile.Profile(**kwargs)
if tests:
testlist = os.path.join(profile.profile, 'reftests.json')
with open(testlist, 'w') as fh:
json.dump(tests, fh)
profile.set_preferences({'reftest.tests': testlist})
elif manifests:
profile.set_preferences({'reftest.manifests': json.dumps(manifests)})
if os.path.join(here, 'chrome') not in options.extraProfileFiles:
options.extraProfileFiles.append(os.path.join(here, 'chrome'))
@ -659,10 +675,23 @@ class RefTest(object):
self.log.info("Can't trigger Breakpad, just killing process")
process.kill()
def runApp(self, profile, binary, cmdargs, env,
timeout=None, debuggerInfo=None,
symbolsPath=None, options=None,
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
def runApp(self, options, cmdargs=None, timeout=None, debuggerInfo=None,
symbolsPath=None, valgrindPath=None, valgrindArgs=None,
valgrindSuppFiles=None, **profileArgs):
if cmdargs is None:
cmdargs = []
if self.use_marionette:
cmdargs.append('-marionette')
binary = options.app
profile = self.createReftestProfile(options, **profileArgs)
# browser environment
env = self.buildBrowserEnv(options, profile.profile)
self.log.info("Running with e10s: {}".format(options.e10s))
def timeoutHandler():
self.handleTimeout(
@ -769,12 +798,41 @@ class RefTest(object):
status = 1
runner.cleanup()
self.cleanup(profile.profile)
if marionette_exception is not None:
exc, value, tb = marionette_exception
raise exc, value, tb
return status, self.lastTestSeen
self.log.info("Process mode: {}".format('e10s' if options.e10s else 'non-e10s'))
return status, self.lastTestSeen, outputHandler.results
def getActiveTests(self, manifests, options, testDumpFile=None):
# These prefs will cause reftest.jsm to parse the manifests,
# dump the resulting tests to a file, and exit.
prefs = {
'reftest.manifests': json.dumps(manifests),
'reftest.manifests.dumpTests': testDumpFile or self.testDumpFile,
}
cmdargs = [] # ['-headless']
status, _, _ = self.runApp(options, cmdargs=cmdargs, prefs=prefs)
with open(self.testDumpFile, 'r') as fh:
tests = json.load(fh)
if os.path.isfile(self.testDumpFile):
mozfile.remove(self.testDumpFile)
for test in tests:
# Name and path are expected by manifestparser, but not used in reftest.
test['name'] = test['path'] = test['url1']
mp = TestManifest(strict=False)
mp.tests = tests
filters = []
tests = mp.active_tests(exists=False, filters=filters)
return tests
def runSerialTests(self, manifests, options, cmdargs=None):
debuggerInfo = None
@ -782,30 +840,21 @@ class RefTest(object):
debuggerInfo = mozdebug.get_debugger_info(options.debugger, options.debuggerArgs,
options.debuggerInteractive)
profileDir = None
tests = None
if self.parse_manifest:
tests = self.getActiveTests(manifests, options)
ids = [t['identifier'] for t in tests]
self.log.suite_start(ids, name=options.suite)
startAfter = None # When the previous run crashed, we skip the tests we ran before
prevStartAfter = None
for i in itertools.count():
try:
if cmdargs is None:
cmdargs = []
if self.use_marionette:
cmdargs.append('-marionette')
profile = self.createReftestProfile(options,
manifests,
startAfter=startAfter)
profileDir = profile.profile # name makes more sense
# browser environment
browserEnv = self.buildBrowserEnv(options, profileDir)
self.log.info("Running with e10s: {}".format(options.e10s))
status, startAfter = self.runApp(profile,
binary=options.app,
status, startAfter, results = self.runApp(
options,
tests=tests,
manifests=manifests,
cmdargs=cmdargs,
env=browserEnv,
# We generally want the JS harness or marionette
# to handle timeouts if they can.
# The default JS harness timeout is currently
@ -818,13 +867,13 @@ class RefTest(object):
# See bug 479518 and bug 1414063.
timeout=options.timeout + 70.0,
symbolsPath=options.symbolsPath,
options=options,
debuggerInfo=debuggerInfo)
self.log.info("Process mode: {}".format('e10s' if options.e10s else 'non-e10s'))
debuggerInfo=debuggerInfo
)
mozleak.process_leak_log(self.leakLogFile,
leak_thresholds=options.leakThresholds,
stack_fixer=get_stack_fixer_function(options.utilityPath,
options.symbolsPath))
if status == 0:
break
@ -849,8 +898,9 @@ class RefTest(object):
break
prevStartAfter = startAfter
# TODO: we need to emit an SUITE-END log if it crashed
finally:
self.cleanup(profileDir)
if self.parse_manifest:
self.log.suite_end(extra={'results': results})
return status
def copyExtraFilesToProfile(self, options, profile):