зеркало из https://github.com/mozilla/gecko-dev.git
Backed out changeset cf85693280c7 (bug 886570) frequent mochitest-3 errors on windows
This commit is contained in:
Родитель
0c223e94ec
Коммит
ce57787693
|
@ -78,7 +78,7 @@ DEBUGGER_INFO = {
|
|||
# semi-deliberately leaked, so we set '--show-possibly-lost=no' to avoid
|
||||
# uninteresting output from those objects. We set '--smc-check==all-non-file'
|
||||
# and '--vex-iropt-register-updates=allregs-at-mem-access' so that valgrind
|
||||
# deals properly with JIT'd JavaScript code.
|
||||
# deals properly with JIT'd JavaScript code.
|
||||
"valgrind": {
|
||||
"interactive": False,
|
||||
"args": " ".join(["--leak-check=full",
|
||||
|
@ -602,31 +602,29 @@ class ShutdownLeaks(object):
|
|||
self.currentTest = None
|
||||
self.seenShutdown = False
|
||||
|
||||
def log(self, message):
|
||||
if message['action'] == 'log':
|
||||
line = message['message']
|
||||
if line[2:11] == "DOMWINDOW":
|
||||
self._logWindow(line)
|
||||
elif line[2:10] == "DOCSHELL":
|
||||
self._logDocShell(line)
|
||||
elif message['action'] == 'test_start':
|
||||
fileName = message['test'].replace("chrome://mochitests/content/browser/", "")
|
||||
def log(self, line):
|
||||
if line[2:11] == "DOMWINDOW":
|
||||
self._logWindow(line)
|
||||
elif line[2:10] == "DOCSHELL":
|
||||
self._logDocShell(line)
|
||||
elif line.startswith("TEST-START"):
|
||||
fileName = line.split(" ")[-1].strip().replace("chrome://mochitests/content/browser/", "")
|
||||
self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()}
|
||||
elif message['action'] == 'test_end':
|
||||
elif line.startswith("INFO TEST-END"):
|
||||
# don't track a test if no windows or docShells leaked
|
||||
if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]):
|
||||
self.tests.append(self.currentTest)
|
||||
self.currentTest = None
|
||||
elif message['action'] == 'suite_end':
|
||||
elif line.startswith("INFO TEST-START | Shutdown"):
|
||||
self.seenShutdown = True
|
||||
|
||||
def process(self):
|
||||
for test in self._parseLeakingTests():
|
||||
for url, count in self._zipLeakedWindows(test["leakedWindows"]):
|
||||
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]" % (test["fileName"], count, url))
|
||||
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]", test["fileName"], count, url)
|
||||
|
||||
if test["leakedDocShells"]:
|
||||
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown" % (test["fileName"], len(test["leakedDocShells"])))
|
||||
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown", test["fileName"], len(test["leakedDocShells"]))
|
||||
|
||||
def _logWindow(self, line):
|
||||
created = line[:2] == "++"
|
||||
|
@ -635,7 +633,7 @@ class ShutdownLeaks(object):
|
|||
|
||||
# log line has invalid format
|
||||
if not pid or not serial:
|
||||
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
|
||||
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line)
|
||||
return
|
||||
|
||||
key = pid + "." + serial
|
||||
|
@ -656,7 +654,7 @@ class ShutdownLeaks(object):
|
|||
|
||||
# log line has invalid format
|
||||
if not pid or not id:
|
||||
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
|
||||
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line)
|
||||
return
|
||||
|
||||
key = pid + "." + id
|
||||
|
|
|
@ -21,13 +21,11 @@ fennecLogcatFilters = [ "The character encoding of the HTML document was not dec
|
|||
class RemoteAutomation(Automation):
|
||||
_devicemanager = None
|
||||
|
||||
def __init__(self, deviceManager, appName = '', remoteLog = None,
|
||||
processArgs=None):
|
||||
def __init__(self, deviceManager, appName = '', remoteLog = None):
|
||||
self._devicemanager = deviceManager
|
||||
self._appName = appName
|
||||
self._remoteProfile = None
|
||||
self._remoteLog = remoteLog
|
||||
self._processArgs = processArgs or {};
|
||||
|
||||
# Default our product to fennec
|
||||
self._product = "fennec"
|
||||
|
@ -186,21 +184,17 @@ class RemoteAutomation(Automation):
|
|||
if stdout == None or stdout == -1 or stdout == subprocess.PIPE:
|
||||
stdout = self._remoteLog
|
||||
|
||||
return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName,
|
||||
**self._processArgs)
|
||||
return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName)
|
||||
|
||||
# be careful here as this inner class doesn't have access to outer class members
|
||||
class RProcess(object):
|
||||
# device manager process
|
||||
dm = None
|
||||
def __init__(self, dm, cmd, stdout=None, stderr=None, env=None, cwd=None, app=None,
|
||||
messageLogger=None):
|
||||
def __init__(self, dm, cmd, stdout = None, stderr = None, env = None, cwd = None, app = None):
|
||||
self.dm = dm
|
||||
self.stdoutlen = 0
|
||||
self.lastTestSeen = "remoteautomation.py"
|
||||
self.proc = dm.launchProcess(cmd, stdout, cwd, env, True)
|
||||
self.messageLogger = messageLogger
|
||||
|
||||
if (self.proc is None):
|
||||
if cmd[0] == 'am':
|
||||
self.proc = stdout
|
||||
|
@ -216,9 +210,6 @@ class RemoteAutomation(Automation):
|
|||
# The benefit of the following sleep is unclear; it was formerly 15 seconds
|
||||
time.sleep(1)
|
||||
|
||||
# Used to buffer log messages until we meet a line break
|
||||
self.logBuffer = ""
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
pid = self.dm.processExist(self.procName)
|
||||
|
@ -230,51 +221,29 @@ class RemoteAutomation(Automation):
|
|||
return 0
|
||||
return pid
|
||||
|
||||
def read_stdout(self):
|
||||
@property
|
||||
def stdout(self):
|
||||
""" Fetch the full remote log file using devicemanager and return just
|
||||
the new log entries since the last call (as a list of messages or lines).
|
||||
the new log entries since the last call (as a multi-line string).
|
||||
"""
|
||||
if not self.dm.fileExists(self.proc):
|
||||
return []
|
||||
try:
|
||||
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
|
||||
except DMError:
|
||||
# we currently don't retry properly in the pullFile
|
||||
# function in dmSUT, so an error here is not necessarily
|
||||
# the end of the world
|
||||
return []
|
||||
if not newLogContent:
|
||||
return []
|
||||
|
||||
self.stdoutlen += len(newLogContent)
|
||||
|
||||
if self.messageLogger is None:
|
||||
if self.dm.fileExists(self.proc):
|
||||
try:
|
||||
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
|
||||
except DMError:
|
||||
# we currently don't retry properly in the pullFile
|
||||
# function in dmSUT, so an error here is not necessarily
|
||||
# the end of the world
|
||||
return ''
|
||||
self.stdoutlen += len(newLogContent)
|
||||
# Match the test filepath from the last TEST-START line found in the new
|
||||
# log content. These lines are in the form:
|
||||
# 1234 INFO TEST-START | /filepath/we/wish/to/capture.html\n
|
||||
testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", newLogContent)
|
||||
if testStartFilenames:
|
||||
self.lastTestSeen = testStartFilenames[-1]
|
||||
print newLogContent
|
||||
return [newLogContent]
|
||||
|
||||
self.logBuffer += newLogContent
|
||||
lines = self.logBuffer.split('\n')
|
||||
if not lines:
|
||||
return
|
||||
|
||||
# We only keep the last (unfinished) line in the buffer
|
||||
self.logBuffer = lines[-1]
|
||||
del lines[-1]
|
||||
messages = []
|
||||
for line in lines:
|
||||
# This passes the line to the logger (to be logged or buffered)
|
||||
# and returns a structured message (dict) or None, depending on the log
|
||||
message = self.messageLogger.write(line)
|
||||
if message is None:
|
||||
continue
|
||||
messages.append(message)
|
||||
if message['action'] == 'test_start':
|
||||
self.lastTestSeen = message['test']
|
||||
|
||||
return messages
|
||||
return newLogContent.strip('\n').strip()
|
||||
else:
|
||||
return ''
|
||||
|
||||
@property
|
||||
def getLastTestSeen(self):
|
||||
|
@ -289,7 +258,7 @@ class RemoteAutomation(Automation):
|
|||
def wait(self, timeout = None, noOutputTimeout = None):
|
||||
timer = 0
|
||||
noOutputTimer = 0
|
||||
interval = 20
|
||||
interval = 20
|
||||
|
||||
if timeout == None:
|
||||
timeout = self.timeout
|
||||
|
@ -297,9 +266,10 @@ class RemoteAutomation(Automation):
|
|||
status = 0
|
||||
while (self.dm.getTopActivity() == self.procName):
|
||||
# retrieve log updates every 60 seconds
|
||||
if timer % 60 == 0:
|
||||
messages = self.read_stdout()
|
||||
if messages:
|
||||
if timer % 60 == 0:
|
||||
t = self.stdout
|
||||
if t != '':
|
||||
print t
|
||||
noOutputTimer = 0
|
||||
|
||||
time.sleep(interval)
|
||||
|
@ -313,7 +283,7 @@ class RemoteAutomation(Automation):
|
|||
break
|
||||
|
||||
# Flush anything added to stdout during the sleep
|
||||
self.read_stdout()
|
||||
print self.stdout
|
||||
|
||||
return status
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ public class FennecMochitestAssert implements Assert {
|
|||
private LinkedList<testInfo> mTestList = new LinkedList<testInfo>();
|
||||
|
||||
// Internal state variables to make logging match up with existing mochitests
|
||||
private int mLineNumber = 0;
|
||||
private int mPassed = 0;
|
||||
private int mFailed = 0;
|
||||
private int mTodo = 0;
|
||||
|
@ -44,13 +45,13 @@ public class FennecMochitestAssert implements Assert {
|
|||
|
||||
String message;
|
||||
if (!mLogStarted) {
|
||||
dumpLog("SimpleTest START");
|
||||
dumpLog(Integer.toString(mLineNumber++) + " INFO SimpleTest START");
|
||||
mLogStarted = true;
|
||||
}
|
||||
|
||||
if (mLogTestName != "") {
|
||||
long diff = SystemClock.uptimeMillis() - mStartTime;
|
||||
message = "TEST-END | " + mLogTestName;
|
||||
message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName;
|
||||
message += " | finished in " + diff + "ms";
|
||||
dumpLog(message);
|
||||
mLogTestName = "";
|
||||
|
@ -62,7 +63,7 @@ public class FennecMochitestAssert implements Assert {
|
|||
mLogTestName = nameParts[nameParts.length - 1];
|
||||
mStartTime = SystemClock.uptimeMillis();
|
||||
|
||||
dumpLog("TEST-START | " + mLogTestName);
|
||||
dumpLog(Integer.toString(mLineNumber++) + " INFO TEST-START | " + mLogTestName);
|
||||
}
|
||||
|
||||
class testInfo {
|
||||
|
@ -94,7 +95,7 @@ public class FennecMochitestAssert implements Assert {
|
|||
String diag = test.mName;
|
||||
if (test.mDiag != null) diag += " - " + test.mDiag;
|
||||
|
||||
String message = resultString + " | " + mLogTestName + " | " + diag;
|
||||
String message = Integer.toString(mLineNumber++) + " INFO " + resultString + " | " + mLogTestName + " | " + diag;
|
||||
dumpLog(message);
|
||||
|
||||
if (test.mInfo) {
|
||||
|
@ -116,21 +117,21 @@ public class FennecMochitestAssert implements Assert {
|
|||
|
||||
if (mLogTestName != "") {
|
||||
long diff = SystemClock.uptimeMillis() - mStartTime;
|
||||
message = "TEST-END | " + mLogTestName;
|
||||
message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName;
|
||||
message += " | finished in " + diff + "ms";
|
||||
dumpLog(message);
|
||||
mLogTestName = "";
|
||||
}
|
||||
|
||||
message = "TEST-START | Shutdown";
|
||||
message = Integer.toString(mLineNumber++) + " INFO TEST-START | Shutdown";
|
||||
dumpLog(message);
|
||||
message = "Passed: " + Integer.toString(mPassed);
|
||||
message = Integer.toString(mLineNumber++) + " INFO Passed: " + Integer.toString(mPassed);
|
||||
dumpLog(message);
|
||||
message = "Failed: " + Integer.toString(mFailed);
|
||||
message = Integer.toString(mLineNumber++) + " INFO Failed: " + Integer.toString(mFailed);
|
||||
dumpLog(message);
|
||||
message = "Todo: " + Integer.toString(mTodo);
|
||||
message = Integer.toString(mLineNumber++) + " INFO Todo: " + Integer.toString(mTodo);
|
||||
dumpLog(message);
|
||||
message = "SimpleTest FINISHED";
|
||||
message = Integer.toString(mLineNumber++) + " INFO SimpleTest FINISHED";
|
||||
dumpLog(message);
|
||||
}
|
||||
|
||||
|
|
|
@ -149,6 +149,7 @@ is(r.result, null,
|
|||
|
||||
// Test loading an empty file works (and doesn't crash!)
|
||||
var emptyFile = createFileWithData("");
|
||||
dump("hello nurse");
|
||||
r = new FileReader();
|
||||
r.onload = getLoadHandler("", 0, "empty no encoding reading");
|
||||
r.readAsText(emptyFile, "");
|
||||
|
@ -426,8 +427,8 @@ function testHasRun() {
|
|||
//alert(testRanCounter);
|
||||
++testRanCounter;
|
||||
if (testRanCounter == expectedTestCount) {
|
||||
is(onloadHasRunText, true, "onload text should have fired by now");
|
||||
is(onloadHasRunBinary, true, "onload binary should have fired by now");
|
||||
is(onloadHasRunText, true, "onload text should have fired by now");
|
||||
is(onloadHasRunBinary, true, "onload binary should have fired by now");
|
||||
SimpleTest.finish();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,30 +37,34 @@
|
|||
oldComplete();
|
||||
}
|
||||
};
|
||||
TestRunner.structuredLogger._dumpMessage = function(msg) {
|
||||
|
||||
let oldLog = TestRunner.log;
|
||||
TestRunner.log = function(msg) {
|
||||
sendAsyncMessage("test:SpeechSynthesis:ipcTestMessage", { msg: msg });
|
||||
}
|
||||
}
|
||||
|
||||
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
function validStructuredMessage(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
}
|
||||
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
|
||||
|
||||
function onTestMessage(data) {
|
||||
let message = SpecialPowers.wrap(data).data.msg;
|
||||
let message = SpecialPowers.wrap(data).json.msg;
|
||||
let match = regex.exec(message);
|
||||
if (match) {
|
||||
let state = match[1];
|
||||
let details = match[2] + " | " + match[3];
|
||||
|
||||
if (validStructuredMessage(message)) {
|
||||
if (message.test === undefined || message.message === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
let details = message.test + " | " + message.message;
|
||||
|
||||
switch(message.action) {
|
||||
case "test_status":
|
||||
case "test_end":
|
||||
ok(message.expected === undefined, message.test, message.message);
|
||||
switch (state) {
|
||||
case "TEST-PASS":
|
||||
case "TEST-KNOWN-FAIL":
|
||||
ok(true, details);
|
||||
break;
|
||||
|
||||
case "TEST-UNEXPECTED-FAIL":
|
||||
case "TEST-UNEXPECTED-PASS":
|
||||
ok(false, details);
|
||||
break;
|
||||
|
||||
case "TEST-DEBUG-INFO":
|
||||
default:
|
||||
info(details);
|
||||
}
|
||||
|
|
|
@ -36,30 +36,34 @@
|
|||
}
|
||||
};
|
||||
|
||||
TestRunner.structuredLogger._dumpMessage = function(msg) {
|
||||
let oldLog = TestRunner.log;
|
||||
TestRunner.log = function(msg) {
|
||||
sendAsyncMessage("test:DeviceStorage:ipcTestMessage", { msg: msg });
|
||||
}
|
||||
}
|
||||
|
||||
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
function validStructuredMessage(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
}
|
||||
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
|
||||
|
||||
function onTestMessage(data) {
|
||||
let message = SpecialPowers.wrap(data).data.msg;
|
||||
let message = SpecialPowers.wrap(data).json.msg;
|
||||
let match = regex.exec(message);
|
||||
if (match) {
|
||||
let state = match[1];
|
||||
let details = match[2] + " | " + match[3];
|
||||
|
||||
if (validStructuredMessage(message)) {
|
||||
if (message.test === undefined || message.message === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (message.action) {
|
||||
case "test_status":
|
||||
case "test_end":
|
||||
ok(message.expected === undefined, message.test, message.message);
|
||||
switch (state) {
|
||||
case "TEST-PASS":
|
||||
case "TEST-KNOWN-FAIL":
|
||||
ok(true, details);
|
||||
break;
|
||||
|
||||
case "TEST-UNEXPECTED-FAIL":
|
||||
case "TEST-UNEXPECTED-PASS":
|
||||
ok(false, details);
|
||||
break;
|
||||
|
||||
case "TEST-DEBUG-INFO":
|
||||
default:
|
||||
let details = message.test + " | " + message.message;
|
||||
info(details);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,17 +42,11 @@ var W3CTest = {
|
|||
|
||||
/**
|
||||
* Prefixes for the error logging. Indexed first by int(todo) and second by
|
||||
* int(result). Also contains the test's status, and expected status.
|
||||
* int(result).
|
||||
*/
|
||||
"prefixes": [
|
||||
[
|
||||
{status: 'FAIL', expected: 'PASS', message: "TEST-UNEXPECTED-FAIL"},
|
||||
{status: 'PASS', expected: 'PASS', message: "TEST-PASS"}
|
||||
],
|
||||
[
|
||||
{status: 'FAIL', expected: 'FAIL', message: "TEST-KNOWN-FAIL"},
|
||||
{status: 'PASS', expected: 'FAIL', message: "TEST-UNEXPECTED-PASS"}
|
||||
]
|
||||
["TEST-UNEXPECTED-FAIL", "TEST-PASS"],
|
||||
["TEST-KNOWN-FAIL", "TEST-UNEXPECTED-PASS"]
|
||||
],
|
||||
|
||||
/**
|
||||
|
@ -139,21 +133,14 @@ var W3CTest = {
|
|||
*/
|
||||
"_log": function(test) {
|
||||
var url = this.getURL();
|
||||
var message = this.formatTestMessage(test);
|
||||
var result = this.prefixes[+test.todo][+test.result];
|
||||
|
||||
var msg = this.prefixes[+test.todo][+test.result] + " | ";
|
||||
if (url) {
|
||||
msg += url;
|
||||
}
|
||||
msg += " | " + this.formatTestMessage(test);
|
||||
if (this.runner) {
|
||||
this.runner.structuredLogger.testStatus(url,
|
||||
test.name,
|
||||
result.status,
|
||||
result.expected,
|
||||
message);
|
||||
this.runner[(test.result === !test.todo) ? "log" : "error"](msg);
|
||||
} else {
|
||||
var msg = result.message + " | ";
|
||||
if (url) {
|
||||
msg += url;
|
||||
}
|
||||
msg += " | " + this.formatTestMessage(test);
|
||||
dump(msg + "\n");
|
||||
}
|
||||
},
|
||||
|
|
|
@ -49,33 +49,44 @@
|
|||
}
|
||||
};
|
||||
|
||||
TestRunner.structuredLogger._dumpMessage = function(msg) {
|
||||
function sendTestMessage(msg) {
|
||||
sendAsyncMessage("test:indexedDB:ipcTestMessage", { msg: msg });
|
||||
}
|
||||
|
||||
TestRunner.log = sendTestMessage;
|
||||
TestRunner.error = sendTestMessage;
|
||||
}
|
||||
|
||||
let regexString =
|
||||
"^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL" +
|
||||
"|TEST-DEBUG-INFO|TEST-INFO) \\| ([^\\|]+) \\|(.*)";
|
||||
|
||||
let regex = new RegExp(regexString);
|
||||
|
||||
let seenTestMessage = false;
|
||||
|
||||
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
function validStructuredMessage(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
}
|
||||
function onTestMessage(data) {
|
||||
seenTestMessage = true;
|
||||
let message = SpecialPowers.wrap(data).data.msg;
|
||||
let match = regex.exec(message);
|
||||
if (match) {
|
||||
let state = match[1];
|
||||
let details = match[2] + " | " + match[3];
|
||||
|
||||
if (validStructuredMessage(message)) {
|
||||
if (message.test === undefined || message.message === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (message.action) {
|
||||
case "test_status":
|
||||
case "test_end":
|
||||
ok(message.expected === undefined, message.test, message.message);
|
||||
switch (state) {
|
||||
case "TEST-PASS":
|
||||
case "TEST-KNOWN-FAIL":
|
||||
ok(true, details);
|
||||
break;
|
||||
|
||||
case "TEST-UNEXPECTED-FAIL":
|
||||
case "TEST-UNEXPECTED-PASS":
|
||||
ok(false, details);
|
||||
break;
|
||||
|
||||
case "TEST-INFO":
|
||||
case "TEST-DEBUG-INFO":
|
||||
default:
|
||||
let details = message.test + " | " + message.message;
|
||||
info(details);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ inputmethod_setup(function() {
|
|||
function appFrameScript() {
|
||||
let input = content.document.getElementById('test-input');
|
||||
input.onkeydown = input.onkeypress = input.onkeyup = function(event) {
|
||||
dump('key event was fired in file_test_backspace_event.html.\n');
|
||||
dump('key event was fired in file_test_backspace_event.html.');
|
||||
sendAsyncMessage('test:KeyBoard:keyEvent', {'type':event.type});
|
||||
};
|
||||
}
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
SpecialPowers.prototype.unregisterProcessCrashObservers = function() { };
|
||||
|
||||
content.wrappedJSObject.RunSet.reloadAndRunAll({
|
||||
preventDefault: function() { },
|
||||
__exposedProps__: { preventDefault: 'r' }
|
||||
preventDefault: function() { },
|
||||
__exposedProps__: { preventDefault: 'r' }
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -44,31 +44,37 @@
|
|||
oldComplete();
|
||||
}
|
||||
};
|
||||
|
||||
TestRunner.structuredLogger._dumpMessage = function(msg) {
|
||||
let oldLog = TestRunner.log;
|
||||
TestRunner.log = function(msg) {
|
||||
sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
|
||||
}
|
||||
};
|
||||
TestRunner.error = function(msg) {
|
||||
sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
|
||||
};
|
||||
}
|
||||
|
||||
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
function validStructuredMessage(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
}
|
||||
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
|
||||
|
||||
function onTestMessage(data) {
|
||||
let message = SpecialPowers.wrap(data).data.msg;
|
||||
let message = SpecialPowers.wrap(data).json.msg;
|
||||
let match = regex.exec(message);
|
||||
if (match) {
|
||||
let state = match[1];
|
||||
let details = match[2] + " | " + match[3];
|
||||
|
||||
if (validStructuredMessage(message)) {
|
||||
if (message.test === undefined || message.message === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (message.action) {
|
||||
case "test_status":
|
||||
case "test_end":
|
||||
ok(message.expected === undefined, message.test, message.message);
|
||||
switch (state) {
|
||||
case "TEST-PASS":
|
||||
case "TEST-KNOWN-FAIL":
|
||||
ok(true, details);
|
||||
break;
|
||||
|
||||
case "TEST-UNEXPECTED-FAIL":
|
||||
case "TEST-UNEXPECTED-PASS":
|
||||
ok(false, details);
|
||||
break;
|
||||
|
||||
case "TEST-DEBUG-INFO":
|
||||
default:
|
||||
let details = message.test + " | " + message.message;
|
||||
info(details);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<body onload="runTests()">
|
||||
<script class="testbody" type="application/javascript">
|
||||
dump('lastScript\n');
|
||||
dump('lastScript');
|
||||
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
setTestPluginEnabledState(SpecialPowers.Ci.nsIPluginTag.STATE_ENABLED);
|
||||
|
|
|
@ -7,7 +7,7 @@ onmessage = function(evt) {
|
|||
id = setTimeout(function() {}, 200);
|
||||
postMessage(clearTimeout(id) == undefined);
|
||||
|
||||
postMessage(dump(42 + '\n') == undefined);
|
||||
postMessage(dump(42) == undefined);
|
||||
|
||||
postMessage('finished');
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=661980
|
|||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=661980">Mozilla Bug 661980</a>
|
||||
<p id="display"></p>
|
||||
<div id="content" style="display: none">
|
||||
|
||||
|
||||
</div>
|
||||
<pre id="test">
|
||||
<script type="application/javascript">
|
||||
|
@ -30,7 +30,7 @@ var fakeTarget = {
|
|||
|
||||
var mouseevent = document.createEvent("MouseEvent");
|
||||
var didThrow = false;
|
||||
dump("hello nurse\n");
|
||||
dump("hello nurse");
|
||||
try {
|
||||
mouseevent.initMouseEvent("mouseover",
|
||||
false, false,
|
||||
|
|
|
@ -33,11 +33,11 @@ function runTest() {
|
|||
// Let parent process sent message.
|
||||
// Wait for onLoad event from 'inner' iframe.
|
||||
yield;
|
||||
|
||||
|
||||
postPos();
|
||||
|
||||
|
||||
inner.onload = null;
|
||||
dump("hi\n");
|
||||
dump("hi");
|
||||
inner.contentWindow.location = "bug583889_inner2.html#id2"
|
||||
waitAsync();
|
||||
// Let parent process sent message.
|
||||
|
|
|
@ -74,7 +74,7 @@ addLoadEvent(function() {
|
|||
|
||||
try {
|
||||
var sheet = $("cross-origin-sheet").sheet;
|
||||
dump('aaa\n');
|
||||
dump('aaa');
|
||||
is(sheet.cssRules.length, 2,
|
||||
"Should be able to get length of list of rules");
|
||||
is(sheet.cssRules[0].style.color, "green",
|
||||
|
|
|
@ -450,7 +450,7 @@ class Marionette(object):
|
|||
emulator_binary=None, emulator_res=None, connect_to_running_emulator=False,
|
||||
gecko_log=None, homedir=None, baseurl=None, no_window=False, logdir=None,
|
||||
busybox=None, symbols_path=None, timeout=None, socket_timeout=360,
|
||||
device_serial=None, adb_path=None, process_args=None):
|
||||
device_serial=None, adb_path=None):
|
||||
self.host = host
|
||||
self.port = self.local_port = port
|
||||
self.bin = bin
|
||||
|
@ -507,8 +507,7 @@ class Marionette(object):
|
|||
userdata=emulator_img,
|
||||
resolution=emulator_res,
|
||||
profile=profile,
|
||||
adb_path=adb_path,
|
||||
process_args=process_args)
|
||||
adb_path=adb_path)
|
||||
self.emulator = self.runner.device
|
||||
self.emulator.start()
|
||||
self.port = self.emulator.setup_port_forwarding(self.port)
|
||||
|
@ -516,8 +515,7 @@ class Marionette(object):
|
|||
|
||||
if connect_to_running_emulator:
|
||||
self.runner = B2GEmulatorRunner(b2g_home=homedir,
|
||||
logdir=logdir,
|
||||
process_args=process_args)
|
||||
logdir=logdir)
|
||||
self.emulator = self.runner.device
|
||||
self.emulator.connect()
|
||||
self.port = self.emulator.setup_port_forwarding(self.port)
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
width="1024">
|
||||
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/MozillaLogger.js"/>
|
||||
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/LogController.js"/>
|
||||
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/TestRunner.js"/>
|
||||
<script type="application/javascript" src="chrome://mochikit/content/chrome-harness.js"/>
|
||||
<script type="application/javascript" src="chrome://mochikit/content/manifestLibrary.js" />
|
||||
<script type="application/javascript" src="chrome://mochikit/content/chunkifyTests.js"/>
|
||||
|
@ -66,7 +65,7 @@
|
|||
var Cc = Components.classes;
|
||||
var Ci = Components.interfaces;
|
||||
}
|
||||
|
||||
|
||||
var gConfig;
|
||||
|
||||
var gDumper = {
|
||||
|
@ -83,12 +82,12 @@
|
|||
delete this.fileLogger;
|
||||
return this.fileLogger = logger;
|
||||
},
|
||||
structuredLogger: new StructuredLogger(),
|
||||
|
||||
dump: function (str) {
|
||||
this.structuredLogger.info(str);
|
||||
dump(str);
|
||||
|
||||
if (this.fileLogger)
|
||||
this.fileLogger.log(str);
|
||||
this.fileLogger._foStream.write(str, str.length);
|
||||
},
|
||||
|
||||
done: function () {
|
||||
|
@ -142,18 +141,7 @@
|
|||
this.lastOutputTime = Date.now();
|
||||
this.results.push(result);
|
||||
|
||||
if (result.info) {
|
||||
if (result.msg) {
|
||||
this.dumper.structuredLogger.info(result.msg);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
this.dumper.structuredLogger.testStatus(this.path,
|
||||
result.msg,
|
||||
result.status,
|
||||
result.expected,
|
||||
result.msg);
|
||||
this.dumper.dump(result.result + " | " + this.path + " | " + result.msg + "\n");
|
||||
},
|
||||
|
||||
setDuration: function setDuration(duration) {
|
||||
|
|
|
@ -59,7 +59,7 @@ function testInit() {
|
|||
messageManager.removeMessageListener("chromeEvent", messageHandler);
|
||||
var url = m.json.data;
|
||||
|
||||
// Window is the [ChromeWindow] for messageManager, so we need content.window
|
||||
// Window is the [ChromeWindow] for messageManager, so we need content.window
|
||||
// Currently chrome tests are run in a content window instead of a ChromeWindow
|
||||
var webNav = content.window.QueryInterface(Components.interfaces.nsIInterfaceRequestor)
|
||||
.getInterface(Components.interfaces.nsIWebNavigation);
|
||||
|
@ -158,7 +158,7 @@ Tester.prototype = {
|
|||
if (gConfig.repeat)
|
||||
this.repeat = gConfig.repeat;
|
||||
|
||||
this.dumper.structuredLogger.info("*** Start BrowserChrome Test Results ***");
|
||||
this.dumper.dump("*** Start BrowserChrome Test Results ***\n");
|
||||
Services.console.registerListener(this);
|
||||
Services.obs.addObserver(this, "chrome-document-global-created", false);
|
||||
Services.obs.addObserver(this, "content-document-global-created", false);
|
||||
|
@ -204,7 +204,7 @@ Tester.prototype = {
|
|||
}
|
||||
|
||||
// Remove stale windows
|
||||
this.dumper.structuredLogger.info("checking window state");
|
||||
this.dumper.dump("TEST-INFO | checking window state\n");
|
||||
let windowsEnum = Services.wm.getEnumerator(null);
|
||||
while (windowsEnum.hasMoreElements()) {
|
||||
let win = windowsEnum.getNext();
|
||||
|
@ -223,10 +223,7 @@ Tester.prototype = {
|
|||
if (this.currentTest)
|
||||
this.currentTest.addResult(new testResult(false, msg, "", false));
|
||||
else
|
||||
this.dumper.structuredLogger.testEnd("browser-test.js",
|
||||
"FAIL",
|
||||
"PASS",
|
||||
msg);
|
||||
this.dumper.dump("TEST-UNEXPECTED-FAIL | (browser-test.js) | " + msg + "\n");
|
||||
|
||||
win.close();
|
||||
}
|
||||
|
@ -253,23 +250,22 @@ Tester.prototype = {
|
|||
Services.obs.removeObserver(this, "chrome-document-global-created");
|
||||
Services.obs.removeObserver(this, "content-document-global-created");
|
||||
this.Promise.Debugging.clearUncaughtErrorObservers();
|
||||
this.dumper.structuredLogger.info("TEST-START | Shutdown");
|
||||
this.dumper.dump("\nINFO TEST-START | Shutdown\n");
|
||||
|
||||
if (this.tests.length) {
|
||||
this.dumper.structuredLogger.info("Browser Chrome Test Summary");
|
||||
this.dumper.structuredLogger.info("Passed: " + passCount);
|
||||
this.dumper.structuredLogger.info("Failed: " + failCount);
|
||||
this.dumper.structuredLogger.info("Todo: " + todoCount);
|
||||
this.dumper.dump("Browser Chrome Test Summary\n");
|
||||
|
||||
this.dumper.dump("\tPassed: " + passCount + "\n" +
|
||||
"\tFailed: " + failCount + "\n" +
|
||||
"\tTodo: " + todoCount + "\n");
|
||||
} else {
|
||||
this.dumper.structuredLogger.testEnd("browser-test.js",
|
||||
"FAIL",
|
||||
"PASS",
|
||||
"No tests to run. Did you pass an invalid --test-path?");
|
||||
this.dumper.dump("TEST-UNEXPECTED-FAIL | (browser-test.js) | " +
|
||||
"No tests to run. Did you pass an invalid --test-path?\n");
|
||||
}
|
||||
this.dumper.structuredLogger.info("*** End BrowserChrome Test Results ***");
|
||||
|
||||
this.dumper.dump("\n*** End BrowserChrome Test Results ***\n");
|
||||
|
||||
this.dumper.done();
|
||||
|
||||
|
||||
// Tests complete, notify the callback and return
|
||||
this.callback(this.tests);
|
||||
this.callback = null;
|
||||
|
@ -418,7 +414,7 @@ Tester.prototype = {
|
|||
.getService(Ci.nsIXULRuntime)
|
||||
.processType == Ci.nsIXULRuntime.PROCESS_TYPE_DEFAULT)
|
||||
{
|
||||
this.MemoryStats.dump(this.dumper.structuredLogger,
|
||||
this.MemoryStats.dump((l) => { this.dumper.dump(l + "\n"); },
|
||||
this.currentTestIndex,
|
||||
this.currentTest.path,
|
||||
gConfig.dumpOutputDirectory,
|
||||
|
@ -428,10 +424,7 @@ Tester.prototype = {
|
|||
|
||||
// Note the test run time
|
||||
let time = Date.now() - this.lastStartTime;
|
||||
this.dumper.structuredLogger.testEnd(this.currentTest.path,
|
||||
"OK",
|
||||
undefined,
|
||||
"finished in " + time + "ms");
|
||||
this.dumper.dump("INFO TEST-END | " + this.currentTest.path + " | finished in " + time + "ms\n");
|
||||
this.currentTest.setDuration(time);
|
||||
|
||||
if (this.runUntilFailure && this.currentTest.failCount > 0) {
|
||||
|
@ -537,7 +530,7 @@ Tester.prototype = {
|
|||
}),
|
||||
|
||||
execTest: function Tester_execTest() {
|
||||
this.dumper.structuredLogger.testStart(this.currentTest.path);
|
||||
this.dumper.dump("TEST-START | " + this.currentTest.path + "\n");
|
||||
|
||||
this.SimpleTest.reset();
|
||||
|
||||
|
@ -706,14 +699,10 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
|
|||
this.todo = aIsTodo;
|
||||
|
||||
if (this.pass) {
|
||||
if (aIsTodo) {
|
||||
this.status = "FAIL";
|
||||
this.expected = "FAIL";
|
||||
} else {
|
||||
this.status = "PASS";
|
||||
this.expected = "PASS";
|
||||
}
|
||||
|
||||
if (aIsTodo)
|
||||
this.result = "TEST-KNOWN-FAIL";
|
||||
else
|
||||
this.result = "TEST-PASS";
|
||||
} else {
|
||||
if (aDiag) {
|
||||
if (typeof aDiag == "object" && "fileName" in aDiag) {
|
||||
|
@ -736,13 +725,10 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
|
|||
}
|
||||
this.msg += Task.Debugging.generateReadableStack(normalized, " ");
|
||||
}
|
||||
if (aIsTodo) {
|
||||
this.status = "PASS";
|
||||
this.expected = "FAIL";
|
||||
} else {
|
||||
this.status = "FAIL";
|
||||
this.expected = "PASS";
|
||||
}
|
||||
if (aIsTodo)
|
||||
this.result = "TEST-UNEXPECTED-PASS";
|
||||
else
|
||||
this.result = "TEST-UNEXPECTED-FAIL";
|
||||
|
||||
if (gConfig.debugOnFailure) {
|
||||
// You've hit this line because you requested to break into the
|
||||
|
@ -755,6 +741,7 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
|
|||
function testMessage(aName) {
|
||||
this.msg = aName || "";
|
||||
this.info = true;
|
||||
this.result = "TEST-INFO";
|
||||
}
|
||||
|
||||
// Need to be careful adding properties to this object, since its properties
|
||||
|
|
|
@ -9,7 +9,7 @@ Components.utils.import("resource://gre/modules/NetUtil.jsm");
|
|||
|
||||
/*
|
||||
* getChromeURI converts a URL to a URI
|
||||
*
|
||||
*
|
||||
* url: string of a URL (http://mochi.test/test.html)
|
||||
* returns: a nsiURI object representing the given URL
|
||||
*
|
||||
|
@ -22,7 +22,7 @@ function getChromeURI(url) {
|
|||
|
||||
/*
|
||||
* Convert a URL (string) into a nsIURI or NSIJARURI
|
||||
* This is intended for URL's that are on a file system
|
||||
* This is intended for URL's that are on a file system
|
||||
* or in packaged up in an extension .jar file
|
||||
*
|
||||
* url: a string of a url on the local system(http://localhost/blah.html)
|
||||
|
@ -64,7 +64,7 @@ function getChromeDir(resolvedURI) {
|
|||
* aTestPath: passed in testPath value from command line such as: dom/tests/mochitest
|
||||
* aDir: the test dir to append to the baseURL after getting a directory interface
|
||||
*
|
||||
* As a note, this is hardcoded to the .jar structure we use for mochitest.
|
||||
* As a note, this is hardcoded to the .jar structure we use for mochitest.
|
||||
* Please don't assume this works for all jar files.
|
||||
*/
|
||||
function getMochitestJarListing(aBasePath, aTestPath, aDir)
|
||||
|
@ -122,11 +122,11 @@ function zList(base, zReader, baseJarName, recurse) {
|
|||
var links = {};
|
||||
var count = 0;
|
||||
var fileArray = [];
|
||||
|
||||
|
||||
while(dirs.hasMore()) {
|
||||
var entryName = dirs.getNext();
|
||||
if (entryName.substr(-1) == '/' && entryName.split('/').length == (base.split('/').length + 1) ||
|
||||
(entryName.substr(-1) != '/' && entryName.split('/').length == (base.split('/').length))) {
|
||||
(entryName.substr(-1) != '/' && entryName.split('/').length == (base.split('/').length))) {
|
||||
fileArray.push(entryName);
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ function getFileListing(basePath, testPath, dir, srvScope)
|
|||
|
||||
if (testPath != undefined) {
|
||||
var extraPath = testPath;
|
||||
|
||||
|
||||
var fileNameRegexp = /(browser|test)_.+\.(xul|html|js)$/;
|
||||
|
||||
// Invalid testPath...
|
||||
|
@ -341,7 +341,7 @@ function getTestFilePath(path) {
|
|||
}
|
||||
|
||||
/*
|
||||
* Simple utility function to take the directory structure in jarentryname and
|
||||
* Simple utility function to take the directory structure in jarentryname and
|
||||
* translate that to a path of a nsILocalFile.
|
||||
*/
|
||||
function buildRelativePath(jarentryname, destdir, basepath)
|
||||
|
|
|
@ -24,7 +24,6 @@ import re
|
|||
import shutil
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
|
@ -39,14 +38,22 @@ from mochitest_options import MochitestOptions
|
|||
from mozprofile import Profile, Preferences
|
||||
from mozprofile.permissions import ServerLocations
|
||||
from urllib import quote_plus as encodeURIComponent
|
||||
from mozlog.structured.formatters import TbplFormatter
|
||||
from mozlog.structured.handlers import StreamHandler
|
||||
from mozlog.structured.structuredlog import StructuredLogger
|
||||
|
||||
# This should use the `which` module already in tree, but it is
|
||||
# not yet present in the mozharness environment
|
||||
from mozrunner.utils import findInPath as which
|
||||
|
||||
# set up logging handler a la automation.py.in for compatability
|
||||
import logging
|
||||
log = logging.getLogger()
|
||||
def resetGlobalLog():
|
||||
while log.handlers:
|
||||
log.removeHandler(log.handlers[0])
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
log.setLevel(logging.INFO)
|
||||
log.addHandler(handler)
|
||||
resetGlobalLog()
|
||||
|
||||
###########################
|
||||
# Option for NSPR logging #
|
||||
###########################
|
||||
|
@ -57,118 +64,6 @@ from mozrunner.utils import findInPath as which
|
|||
|
||||
NSPR_LOG_MODULES = ""
|
||||
|
||||
####################
|
||||
# LOG HANDLING #
|
||||
####################
|
||||
|
||||
### output processing
|
||||
class MochitestFormatter(TbplFormatter):
|
||||
log_num = 0
|
||||
|
||||
def __init__(self):
|
||||
super(MochitestFormatter, self).__init__()
|
||||
|
||||
def __call__(self, data):
|
||||
tbpl_output = super(MochitestFormatter, self).__call__(data)
|
||||
output = '%d INFO %s' % (MochitestFormatter.log_num, tbpl_output)
|
||||
MochitestFormatter.log_num += 1
|
||||
return output
|
||||
|
||||
### output processing
|
||||
class MessageLogger(object):
|
||||
"""File-like object for logging messages (structured logs)"""
|
||||
BUFFERING_THRESHOLD = 100
|
||||
BUFFERED_ACTIONS = set(['test_status', 'log'])
|
||||
|
||||
def __init__(self, logger, buffering=True, name='mochitest'):
|
||||
self.logger = logger
|
||||
self.buffering = buffering
|
||||
self.tests_started = False
|
||||
|
||||
# Message buffering
|
||||
self.buffered_messages = []
|
||||
|
||||
def write(self, line):
|
||||
# Decoding messages (structured or not)
|
||||
line = line.rstrip().decode("UTF-8", "replace")
|
||||
# Skipping empty lines
|
||||
if not line:
|
||||
return
|
||||
|
||||
try:
|
||||
message = json.loads(line)
|
||||
if not isinstance(message, dict) or not 'action' in message:
|
||||
return
|
||||
except ValueError:
|
||||
message = dict(action='log', level='info', message=line)
|
||||
|
||||
if not self.tests_started and message['action'] == 'test_start':
|
||||
self.tests_started = True
|
||||
|
||||
# Activation/deactivating message buffering from the JS side
|
||||
if message['action'] == 'buffering_on':
|
||||
self.buffering = True
|
||||
return message
|
||||
if message['action'] == 'buffering_off':
|
||||
self.buffering = False
|
||||
return message
|
||||
|
||||
# If we don't do any buffering, or the tests haven't started, the messages are directly logged
|
||||
if not self.buffering or not self.tests_started:
|
||||
self.logger.log_raw(message)
|
||||
return message
|
||||
|
||||
# If a test ended, we clean the buffer
|
||||
if message['action'] == 'test_end':
|
||||
self.buffered_messages = []
|
||||
|
||||
# Buffering logic; Also supports "raw" errors (in log messages) because some tests manually dump 'TEST-UNEXPECTED-FAIL'.
|
||||
is_error = 'expected' in message or (message['action'] == 'log' and message['message'].startswith('TEST-UNEXPECTED'))
|
||||
if not is_error and message['action'] not in self.BUFFERED_ACTIONS:
|
||||
self.logger.log_raw(message)
|
||||
return message
|
||||
|
||||
# test_status messages buffering
|
||||
if is_error:
|
||||
if self.buffered_messages:
|
||||
number_messages = min(self.BUFFERING_THRESHOLD, len(self.buffered_messages))
|
||||
self.logger.info("dumping last {0} message(s)".format(number_messages))
|
||||
self.logger.info("if you need more context, please use SimpleTest.requestCompleteLog() in your test")
|
||||
# Dumping previously buffered messages
|
||||
self.dump_buffered(limit=True)
|
||||
|
||||
# Logging the error message
|
||||
self.logger.log_raw(message)
|
||||
else:
|
||||
# Buffering the message
|
||||
self.buffered_messages.append(message)
|
||||
|
||||
return message
|
||||
|
||||
def flush(self):
|
||||
sys.stdout.flush()
|
||||
|
||||
def dump_buffered(self, limit=False):
|
||||
if limit:
|
||||
dumped_messages = self.buffered_messages[-self.BUFFERING_THRESHOLD:]
|
||||
else:
|
||||
dumped_messages = self.buffered_messages
|
||||
|
||||
for buf_msg in dumped_messages:
|
||||
self.logger.log_raw(buf_msg)
|
||||
# Cleaning the list of buffered messages
|
||||
self.buffered_messages = []
|
||||
|
||||
def finish(self):
|
||||
self.dump_buffered()
|
||||
self.buffering = False
|
||||
self.logger.suite_end()
|
||||
|
||||
# Global logger
|
||||
log = StructuredLogger('mochitest')
|
||||
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
|
||||
log.add_handler(stream_handler)
|
||||
|
||||
####################
|
||||
# PROCESS HANDLING #
|
||||
####################
|
||||
|
@ -186,7 +81,7 @@ def killPid(pid):
|
|||
try:
|
||||
os.kill(pid, getattr(signal, "SIGKILL", signal.SIGTERM))
|
||||
except Exception, e:
|
||||
log.info("Failed to kill process %d: %s" % (pid, str(e)))
|
||||
log.info("Failed to kill process %d: %s", pid, str(e))
|
||||
|
||||
if mozinfo.isWin:
|
||||
import ctypes, ctypes.wintypes, time, msvcrt
|
||||
|
@ -278,9 +173,9 @@ class MochitestServer(object):
|
|||
command = [xpcshell] + args
|
||||
self._process = mozprocess.ProcessHandler(command, cwd=SCRIPT_DIR, env=env)
|
||||
self._process.run()
|
||||
log.info("%s : launching %s" % (self.__class__.__name__, command))
|
||||
log.info("%s : launching %s", self.__class__.__name__, command)
|
||||
pid = self._process.pid
|
||||
log.info("runtests.py | Server pid: %d" % pid)
|
||||
log.info("runtests.py | Server pid: %d", pid)
|
||||
|
||||
def ensureReady(self, timeout):
|
||||
assert timeout >= 0
|
||||
|
@ -344,7 +239,7 @@ class WebSocketServer(object):
|
|||
self._process = mozprocess.ProcessHandler(cmd, cwd=SCRIPT_DIR)
|
||||
self._process.run()
|
||||
pid = self._process.pid
|
||||
log.info("runtests.py | Websocket server pid: %d" % pid)
|
||||
log.info("runtests.py | Websocket server pid: %d", pid)
|
||||
|
||||
def stop(self):
|
||||
self._process.kill()
|
||||
|
@ -483,6 +378,8 @@ class MochitestUtilsMixin(object):
|
|||
self.urlOpts.append("dumpAboutMemoryAfterTest=true")
|
||||
if options.dumpDMDAfterTest:
|
||||
self.urlOpts.append("dumpDMDAfterTest=true")
|
||||
if options.quiet:
|
||||
self.urlOpts.append("quiet=true")
|
||||
|
||||
def getTestFlavor(self, options):
|
||||
if options.browserChrome:
|
||||
|
@ -574,10 +471,6 @@ class MochitestUtilsMixin(object):
|
|||
continue
|
||||
paths.append(test)
|
||||
|
||||
# suite_start message
|
||||
flat_paths = [p['path'] for p in paths]
|
||||
self.message_logger.logger.suite_start(flat_paths)
|
||||
|
||||
# Bug 883865 - add this functionality into manifestparser
|
||||
with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile:
|
||||
manifestFile.write(json.dumps({'tests': paths}))
|
||||
|
@ -637,21 +530,21 @@ class MochitestUtilsMixin(object):
|
|||
log.info('Stopping web server')
|
||||
self.server.stop()
|
||||
except Exception:
|
||||
log.critical('Exception when stopping web server')
|
||||
log.exception('Exception when stopping web server')
|
||||
|
||||
if self.wsserver is not None:
|
||||
try:
|
||||
log.info('Stopping web socket server')
|
||||
self.wsserver.stop()
|
||||
except Exception:
|
||||
log.critical('Exception when stopping web socket server');
|
||||
log.exception('Exception when stopping web socket server');
|
||||
|
||||
if self.sslTunnel is not None:
|
||||
try:
|
||||
log.info('Stopping ssltunnel')
|
||||
self.sslTunnel.stop()
|
||||
except Exception:
|
||||
log.critical('Exception stopping ssltunnel');
|
||||
log.exception('Exception stopping ssltunnel');
|
||||
|
||||
def copyExtraFilesToProfile(self, options):
|
||||
"Copy extra files or dirs specified on the command line to the testing profile."
|
||||
|
@ -663,7 +556,7 @@ class MochitestUtilsMixin(object):
|
|||
dest = os.path.join(options.profilePath, os.path.basename(abspath))
|
||||
shutil.copytree(abspath, dest)
|
||||
else:
|
||||
log.warning("runtests.py | Failed to copy %s to profile" % abspath)
|
||||
log.warning("runtests.py | Failed to copy %s to profile", abspath)
|
||||
|
||||
def installChromeJar(self, chrome, options):
|
||||
"""
|
||||
|
@ -708,7 +601,7 @@ toolbar#nav-bar {
|
|||
|
||||
# Call installChromeJar().
|
||||
if not os.path.isdir(os.path.join(SCRIPT_DIR, self.jarDir)):
|
||||
log.error(message="TEST-UNEXPECTED-FAIL | invalid setup: missing mochikit extension")
|
||||
log.testFail("invalid setup: missing mochikit extension")
|
||||
return None
|
||||
|
||||
# Support Firefox (browser), B2G (shell), SeaMonkey (navigator), and Webapp
|
||||
|
@ -814,7 +707,7 @@ class SSLTunnel:
|
|||
self.process = mozprocess.ProcessHandler([ssltunnel, self.configFile],
|
||||
env=env)
|
||||
self.process.run()
|
||||
log.info("runtests.py | SSL tunnel pid: %d" % self.process.pid)
|
||||
log.info("INFO | runtests.py | SSL tunnel pid: %d", self.process.pid)
|
||||
|
||||
def stop(self):
|
||||
""" Stops the SSL Tunnel and cleans up """
|
||||
|
@ -948,9 +841,6 @@ class Mochitest(MochitestUtilsMixin):
|
|||
def __init__(self):
|
||||
super(Mochitest, self).__init__()
|
||||
|
||||
# Structured logs parser
|
||||
self.message_logger = MessageLogger(logger=log)
|
||||
|
||||
# environment function for browserEnv
|
||||
self.environment = environment
|
||||
|
||||
|
@ -1094,7 +984,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
# TODO: this should really be upstreamed somewhere, maybe mozprofile
|
||||
certificateStatus = self.fillCertificateDB(options)
|
||||
if certificateStatus:
|
||||
log.error("TEST-UNEXPECTED-FAIL | runtests.py | Certificate integration failed")
|
||||
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Certificate integration failed")
|
||||
return None
|
||||
|
||||
return manifest
|
||||
|
@ -1150,7 +1040,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
if os.path.exists(options.pidFile + ".xpcshell.pid"):
|
||||
os.remove(options.pidFile + ".xpcshell.pid")
|
||||
except:
|
||||
log.warning("cleaning up pidfile '%s' was unsuccessful from the test harness" % options.pidFile)
|
||||
log.warn("cleaning up pidfile '%s' was unsuccessful from the test harness", options.pidFile)
|
||||
options.manifestFile = None
|
||||
|
||||
def dumpScreen(self, utilityPath):
|
||||
|
@ -1193,12 +1083,12 @@ class Mochitest(MochitestUtilsMixin):
|
|||
"""Look for hung processes"""
|
||||
|
||||
if not os.path.exists(processLog):
|
||||
log.info('Automation Error: PID log not found: %s' % processLog)
|
||||
log.info('Automation Error: PID log not found: %s', processLog)
|
||||
# Whilst no hung process was found, the run should still display as a failure
|
||||
return True
|
||||
|
||||
# scan processLog for zombies
|
||||
log.info('zombiecheck | Reading PID log: %s' % processLog)
|
||||
log.info('INFO | zombiecheck | Reading PID log: %s', processLog)
|
||||
processList = []
|
||||
pidRE = re.compile(r'launched child process (\d+)$')
|
||||
with open(processLog) as processLogFD:
|
||||
|
@ -1211,10 +1101,10 @@ class Mochitest(MochitestUtilsMixin):
|
|||
# kill zombies
|
||||
foundZombie = False
|
||||
for processPID in processList:
|
||||
log.info("zombiecheck | Checking for orphan process with PID: %d" % processPID)
|
||||
log.info("INFO | zombiecheck | Checking for orphan process with PID: %d", processPID)
|
||||
if isPidAlive(processPID):
|
||||
foundZombie = True
|
||||
log.error("TEST-UNEXPECTED-FAIL | zombiecheck | child process %d still alive after shutdown" % processPID)
|
||||
log.info("TEST-UNEXPECTED-FAIL | zombiecheck | child process %d still alive after shutdown", processPID)
|
||||
self.killAndGetStack(processPID, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
|
||||
|
||||
return foundZombie
|
||||
|
@ -1264,16 +1154,12 @@ class Mochitest(MochitestUtilsMixin):
|
|||
webapprtChrome=False,
|
||||
screenshotOnFail=False,
|
||||
testPath=None,
|
||||
bisectChunk=None,
|
||||
quiet=False):
|
||||
bisectChunk=None):
|
||||
"""
|
||||
Run the app, log the duration it took to execute, return the status code.
|
||||
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
|
||||
"""
|
||||
|
||||
# configure the message logger buffering
|
||||
self.message_logger.buffering = quiet
|
||||
|
||||
# debugger information
|
||||
interactive = False
|
||||
debug_args = None
|
||||
|
@ -1367,7 +1253,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
interactive=interactive,
|
||||
outputTimeout=timeout)
|
||||
proc = runner.process_handler
|
||||
log.info("runtests.py | Application pid: %d" % proc.pid)
|
||||
log.info("INFO | runtests.py | Application pid: %d", proc.pid)
|
||||
|
||||
if onLaunch is not None:
|
||||
# Allow callers to specify an onLaunch callback to be fired after the
|
||||
|
@ -1395,12 +1281,11 @@ class Mochitest(MochitestUtilsMixin):
|
|||
|
||||
# record post-test information
|
||||
if status:
|
||||
self.message_logger.dump_buffered()
|
||||
log.error("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % (self.lastTestSeen, status))
|
||||
log.info("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s", self.lastTestSeen, status)
|
||||
else:
|
||||
self.lastTestSeen = 'Main app process exited normally'
|
||||
|
||||
log.info("runtests.py | Application ran for: %s" % str(datetime.now() - startTime))
|
||||
log.info("INFO | runtests.py | Application ran for: %s", str(datetime.now() - startTime))
|
||||
|
||||
# Do a final check for zombie child processes.
|
||||
zombieProcesses = self.checkForZombies(processLog, utilityPath, debuggerInfo)
|
||||
|
@ -1682,8 +1567,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
webapprtChrome=options.webapprtChrome,
|
||||
screenshotOnFail=options.screenshotOnFail,
|
||||
testPath=options.testPath,
|
||||
bisectChunk=options.bisectChunk,
|
||||
quiet=options.quiet
|
||||
bisectChunk=options.bisectChunk
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
log.info("runtests.py | Received keyboard interrupt.\n");
|
||||
|
@ -1708,8 +1592,6 @@ class Mochitest(MochitestUtilsMixin):
|
|||
|
||||
log.info("runtests.py | Running tests: end.")
|
||||
|
||||
self.message_logger.finish()
|
||||
|
||||
if self.manifest is not None:
|
||||
self.cleanup(options)
|
||||
|
||||
|
@ -1719,18 +1601,13 @@ class Mochitest(MochitestUtilsMixin):
|
|||
"""handle process output timeout"""
|
||||
# TODO: bug 913975 : _processOutput should call self.processOutputLine one more time one timeout (I think)
|
||||
if testPath:
|
||||
error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output on %s" % (self.lastTestSeen, int(timeout), testPath)
|
||||
log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output on %s", self.lastTestSeen, int(timeout), testPath)
|
||||
else:
|
||||
error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output" % (self.lastTestSeen, int(timeout))
|
||||
|
||||
self.message_logger.dump_buffered()
|
||||
self.message_logger.buffering = False
|
||||
log.error(error_message)
|
||||
|
||||
log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output", self.lastTestSeen, int(timeout))
|
||||
browserProcessId = browserProcessId or proc.pid
|
||||
self.killAndGetStack(browserProcessId, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
|
||||
|
||||
|
||||
### output processing
|
||||
|
||||
class OutputHandler(object):
|
||||
"""line output handler for mozrunner"""
|
||||
|
@ -1759,30 +1636,28 @@ class Mochitest(MochitestUtilsMixin):
|
|||
# stack fixer function and/or process
|
||||
self.stackFixerFunction, self.stackFixerProcess = self.stackFixer()
|
||||
|
||||
# Setting up the handlers
|
||||
for handler in self.outputHandlers():
|
||||
log.add_handler(handler)
|
||||
|
||||
def processOutputLine(self, line):
|
||||
"""per line handler of output for mozprocess"""
|
||||
# Passing the line to the message logger, to be parsed, logged and passed to the handlers.
|
||||
self.harness.message_logger.write(line)
|
||||
|
||||
for handler in self.outputHandlers():
|
||||
line = handler(line)
|
||||
if self.bisectChunk:
|
||||
self.record_result(line)
|
||||
self.first_error(line)
|
||||
__call__ = processOutputLine
|
||||
|
||||
def outputHandlers(self):
|
||||
"""returns ordered list of output handlers"""
|
||||
handlers = [self.fix_stack,
|
||||
self.record_last_test,
|
||||
self.dumpScreenOnTimeout,
|
||||
self.dumpScreenOnFail,
|
||||
self.trackShutdownLeaks,
|
||||
self.trackLSANLeaks,
|
||||
self.countline,
|
||||
]
|
||||
if self.bisectChunk:
|
||||
handlers += [self.record_result, self.first_error]
|
||||
return handlers
|
||||
return [self.fix_stack,
|
||||
self.format,
|
||||
self.record_last_test,
|
||||
self.dumpScreenOnTimeout,
|
||||
self.dumpScreenOnFail,
|
||||
self.metro_subprocess_id,
|
||||
self.trackShutdownLeaks,
|
||||
self.trackLSANLeaks,
|
||||
self.log,
|
||||
self.countline,
|
||||
]
|
||||
|
||||
def stackFixer(self):
|
||||
"""
|
||||
|
@ -1826,7 +1701,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
self.stackFixerProcess.communicate()
|
||||
status = self.stackFixerProcess.returncode
|
||||
if status and not didTimeout:
|
||||
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Stack fixer process exited with code %d during test run" % status)
|
||||
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Stack fixer process exited with code %d during test run", status)
|
||||
|
||||
if self.shutdownLeaks:
|
||||
self.shutdownLeaks.process()
|
||||
|
@ -1834,38 +1709,34 @@ class Mochitest(MochitestUtilsMixin):
|
|||
if self.lsanLeaks:
|
||||
self.lsanLeaks.process()
|
||||
|
||||
# output message handlers:
|
||||
# these take a message and return a message
|
||||
|
||||
def record_result(self, message):
|
||||
if message['action'] == 'test_start': #by default make the result key equal to pass.
|
||||
key = message['test'].split('/')[-1].strip()
|
||||
# output line handlers:
|
||||
# these take a line and return a line
|
||||
def record_result(self, line):
|
||||
if "TEST-START" in line: #by default make the result key equal to pass.
|
||||
key = line.split('|')[-1].split('/')[-1].strip()
|
||||
self.harness.result[key] = "PASS"
|
||||
elif message['action'] in ['test_end', 'test_status']:
|
||||
if 'expected' in message:
|
||||
key = message['test'].split('/')[-1].strip()
|
||||
self.harness.result[key] = "FAIL"
|
||||
elif message['status'] == 'FAIL':
|
||||
key = message['test'].split('/')[-1].strip()
|
||||
self.harness.result[key] = "TODO"
|
||||
return message
|
||||
elif "TEST-UNEXPECTED" in line:
|
||||
key = line.split('|')[-2].split('/')[-1].strip()
|
||||
self.harness.result[key] = "FAIL"
|
||||
elif "TEST-KNOWN-FAIL" in line:
|
||||
key = line.split('|')[-2].split('/')[-1].strip()
|
||||
self.harness.result[key] = "TODO"
|
||||
return line
|
||||
|
||||
def first_error(self, message):
|
||||
if 'expected' in message and message['status'] == 'FAIL':
|
||||
key = message['test'].split('/')[-1].strip()
|
||||
def first_error(self, line):
|
||||
if "TEST-UNEXPECTED-FAIL" in line:
|
||||
key = line.split('|')[-2].split('/')[-1].strip()
|
||||
if key not in self.harness.expectedError:
|
||||
self.harness.expectedError[key] = message['message'].strip()
|
||||
return message
|
||||
self.harness.expectedError[key] = line.split('|')[-1].strip()
|
||||
return line
|
||||
|
||||
def countline(self, message):
|
||||
if message['action'] != 'log':
|
||||
return message
|
||||
line = message['message']
|
||||
def countline(self, line):
|
||||
val = 0
|
||||
try:
|
||||
val = int(line.split(':')[-1].strip())
|
||||
except ValueError:
|
||||
return message
|
||||
except ValueError, e:
|
||||
return line
|
||||
|
||||
if "Passed:" in line:
|
||||
self.harness.countpass += val
|
||||
|
@ -1873,42 +1744,56 @@ class Mochitest(MochitestUtilsMixin):
|
|||
self.harness.countfail += val
|
||||
elif "Todo:" in line:
|
||||
self.harness.counttodo += val
|
||||
return message
|
||||
return line
|
||||
|
||||
def fix_stack(self, message):
|
||||
if message['action'] == 'log' and self.stackFixerFunction:
|
||||
return self.stackFixerFunction(message['message'])
|
||||
return message
|
||||
def fix_stack(self, line):
|
||||
if self.stackFixerFunction:
|
||||
return self.stackFixerFunction(line)
|
||||
return line
|
||||
|
||||
def record_last_test(self, message):
|
||||
def format(self, line):
|
||||
"""format the line"""
|
||||
return line.rstrip().decode("UTF-8", "ignore")
|
||||
|
||||
def record_last_test(self, line):
|
||||
"""record last test on harness"""
|
||||
if message['action'] == 'test_start':
|
||||
self.harness.lastTestSeen = message['test']
|
||||
return message
|
||||
if "TEST-START" in line and "|" in line:
|
||||
self.harness.lastTestSeen = line.split("|")[1].strip()
|
||||
return line
|
||||
|
||||
def dumpScreenOnTimeout(self, message):
|
||||
if (not self.dump_screen_on_fail
|
||||
and self.dump_screen_on_timeout
|
||||
and 'expected' in message and message['status'] == 'FAIL'
|
||||
and 'message' in message
|
||||
and "Test timed out" in message['message']):
|
||||
def dumpScreenOnTimeout(self, line):
|
||||
if not self.dump_screen_on_fail and self.dump_screen_on_timeout and "TEST-UNEXPECTED-FAIL" in line and "Test timed out" in line:
|
||||
self.harness.dumpScreen(self.utilityPath)
|
||||
return message
|
||||
return line
|
||||
|
||||
def dumpScreenOnFail(self, message):
|
||||
if self.dump_screen_on_fail and 'expected' in message and message['status'] == 'FAIL':
|
||||
def dumpScreenOnFail(self, line):
|
||||
if self.dump_screen_on_fail and "TEST-UNEXPECTED-FAIL" in line:
|
||||
self.harness.dumpScreen(self.utilityPath)
|
||||
return message
|
||||
return line
|
||||
|
||||
def trackLSANLeaks(self, message):
|
||||
if self.lsanLeaks and message['action'] == 'log':
|
||||
self.lsanLeaks.log(message['message'])
|
||||
return message
|
||||
def metro_subprocess_id(self, line):
|
||||
"""look for metro browser subprocess id"""
|
||||
if "METRO_BROWSER_PROCESS" in line:
|
||||
index = line.find("=")
|
||||
if index != -1:
|
||||
self.browserProcessId = line[index+1:].rstrip()
|
||||
log.info("INFO | runtests.py | metro browser sub process id detected: %s", self.browserProcessId)
|
||||
return line
|
||||
|
||||
def trackShutdownLeaks(self, message):
|
||||
def trackShutdownLeaks(self, line):
|
||||
if self.shutdownLeaks:
|
||||
self.shutdownLeaks.log(message)
|
||||
return message
|
||||
self.shutdownLeaks.log(line)
|
||||
return line
|
||||
|
||||
def trackLSANLeaks(self, line):
|
||||
if self.lsanLeaks:
|
||||
self.lsanLeaks.log(line)
|
||||
return line
|
||||
|
||||
def log(self, line):
|
||||
log.info(line)
|
||||
return line
|
||||
|
||||
|
||||
def makeTestConfig(self, options):
|
||||
"Creates a test configuration file for customizing test execution."
|
||||
|
|
|
@ -16,18 +16,17 @@ sys.path.insert(0, here)
|
|||
|
||||
from runtests import Mochitest
|
||||
from runtests import MochitestUtilsMixin
|
||||
from runtests import MessageLogger
|
||||
from runtests import MochitestFormatter
|
||||
from runtests import MochitestServer
|
||||
from mochitest_options import B2GOptions, MochitestOptions
|
||||
from marionette import Marionette
|
||||
from mozprofile import Profile, Preferences
|
||||
import mozinfo
|
||||
from mozlog.structured.handlers import StreamHandler
|
||||
from mozlog.structured.structuredlog import StructuredLogger
|
||||
|
||||
log = StructuredLogger('Mochitest')
|
||||
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
|
||||
log.add_handler(stream_handler)
|
||||
from marionette import Marionette
|
||||
|
||||
from mozdevice import DeviceManagerADB
|
||||
from mozprofile import Profile, Preferences
|
||||
import mozlog
|
||||
import mozinfo
|
||||
|
||||
log = mozlog.getLogger('Mochitest')
|
||||
|
||||
class B2GMochitest(MochitestUtilsMixin):
|
||||
marionette = None
|
||||
|
@ -46,9 +45,6 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
self.test_script_args = [self.out_of_process]
|
||||
self.product = 'b2g'
|
||||
|
||||
# structured logging
|
||||
self.message_logger = MessageLogger(logger=log)
|
||||
|
||||
if profile_data_dir:
|
||||
self.preferences = [os.path.join(profile_data_dir, f)
|
||||
for f in os.listdir(profile_data_dir) if f.startswith('pref')]
|
||||
|
@ -123,9 +119,6 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
manifest = self.build_profile(options)
|
||||
self.leak_report_file = os.path.join(options.profilePath, "runtests_leaks.log")
|
||||
|
||||
# configuring the message logger's buffering
|
||||
self.message_logger.buffering = options.quiet
|
||||
|
||||
if options.debugger or not options.autorun:
|
||||
timeout = None
|
||||
else:
|
||||
|
@ -139,17 +132,7 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
log.info("runtestsb2g.py | Running tests: start.")
|
||||
status = 0
|
||||
try:
|
||||
def on_output(line):
|
||||
message = self.message_logger.write(line)
|
||||
if message and message['action'] == 'test_start':
|
||||
self.last_test = message['test']
|
||||
|
||||
# The logging will be handled by on_output, so we set the stream to None
|
||||
process_args = {'processOutputLine': on_output,
|
||||
'stream': None}
|
||||
self.marionette_args['process_args'] = process_args
|
||||
self.marionette_args['profile'] = self.profile
|
||||
|
||||
self.marionette = Marionette(**self.marionette_args)
|
||||
self.runner = self.marionette.runner
|
||||
self.app_ctx = self.runner.app_ctx
|
||||
|
@ -188,7 +171,6 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
self.marionette.execute_script(self.test_script,
|
||||
script_args=self.test_script_args)
|
||||
status = self.runner.wait()
|
||||
|
||||
if status is None:
|
||||
# the runner has timed out
|
||||
status = 124
|
||||
|
@ -205,7 +187,6 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
self.stopServers()
|
||||
|
||||
log.info("runtestsb2g.py | Running tests: end.")
|
||||
self.message_logger.finish()
|
||||
|
||||
if manifest is not None:
|
||||
self.cleanup(manifest, options)
|
||||
|
|
|
@ -2,36 +2,33 @@
|
|||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
import base64
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import tempfile
|
||||
import re
|
||||
import traceback
|
||||
import shutil
|
||||
import math
|
||||
import base64
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(__file__))))
|
||||
|
||||
from automation import Automation
|
||||
from remoteautomation import RemoteAutomation, fennecLogcatFilters
|
||||
from runtests import Mochitest, MessageLogger, MochitestFormatter
|
||||
from runtests import Mochitest
|
||||
from runtests import MochitestServer
|
||||
from mochitest_options import MochitestOptions
|
||||
|
||||
import devicemanager
|
||||
import droid
|
||||
import manifestparser
|
||||
import mozinfo
|
||||
import mozlog
|
||||
import moznetwork
|
||||
from mozlog.structured.handlers import StreamHandler
|
||||
from mozlog.structured.structuredlog import StructuredLogger
|
||||
|
||||
log = StructuredLogger('Mochi-Remote')
|
||||
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
|
||||
log.add_handler(stream_handler)
|
||||
|
||||
SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
|
||||
log = mozlog.getLogger('Mochi-Remote')
|
||||
|
||||
class RemoteOptions(MochitestOptions):
|
||||
|
||||
|
@ -185,19 +182,19 @@ class RemoteOptions(MochitestOptions):
|
|||
# Robocop specific options
|
||||
if options.robocopIni != "":
|
||||
if not os.path.exists(options.robocopIni):
|
||||
log.error("Unable to find specified robocop .ini manifest '%s'" % options.robocopIni)
|
||||
log.error("Unable to find specified robocop .ini manifest '%s'", options.robocopIni)
|
||||
return None
|
||||
options.robocopIni = os.path.abspath(options.robocopIni)
|
||||
|
||||
if options.robocopApk != "":
|
||||
if not os.path.exists(options.robocopApk):
|
||||
log.error("Unable to find robocop APK '%s'" % options.robocopApk)
|
||||
log.error("Unable to find robocop APK '%s'", options.robocopApk)
|
||||
return None
|
||||
options.robocopApk = os.path.abspath(options.robocopApk)
|
||||
|
||||
if options.robocopIds != "":
|
||||
if not os.path.exists(options.robocopIds):
|
||||
log.error("Unable to find specified robocop IDs file '%s'" % options.robocopIds)
|
||||
log.error("Unable to find specified robocop IDs file '%s'", options.robocopIds)
|
||||
return None
|
||||
options.robocopIds = os.path.abspath(options.robocopIds)
|
||||
|
||||
|
@ -228,9 +225,9 @@ class MochiRemote(Mochitest):
|
|||
_automation = None
|
||||
_dm = None
|
||||
localProfile = None
|
||||
logMessages = []
|
||||
logLines = []
|
||||
|
||||
def __init__(self, automation, devmgr, options, message_logger=None):
|
||||
def __init__(self, automation, devmgr, options):
|
||||
self._automation = automation
|
||||
Mochitest.__init__(self)
|
||||
self._dm = devmgr
|
||||
|
@ -242,15 +239,13 @@ class MochiRemote(Mochitest):
|
|||
self._automation.deleteANRs()
|
||||
self.certdbNew = True
|
||||
|
||||
# structured logging
|
||||
self.message_logger = message_logger or MessageLogger(logger=log)
|
||||
|
||||
def cleanup(self, options):
|
||||
if self._dm.fileExists(self.remoteLog):
|
||||
self._dm.getFile(self.remoteLog, self.localLog)
|
||||
self._dm.removeFile(self.remoteLog)
|
||||
else:
|
||||
log.warning("Unable to retrieve log file (%s) from remote device" % self.remoteLog)
|
||||
log.warn("Unable to retrieve log file (%s) from remote device",
|
||||
self.remoteLog)
|
||||
self._dm.removeDir(self.remoteProfile)
|
||||
Mochitest.cleanup(self, options)
|
||||
|
||||
|
@ -300,7 +295,7 @@ class MochiRemote(Mochitest):
|
|||
]
|
||||
options.xrePath = self.findPath(paths)
|
||||
if options.xrePath == None:
|
||||
log.error("unable to find xulrunner path for %s, please specify with --xre-path" % os.name)
|
||||
log.error("unable to find xulrunner path for %s, please specify with --xre-path", os.name)
|
||||
sys.exit(1)
|
||||
|
||||
xpcshell = "xpcshell"
|
||||
|
@ -314,7 +309,7 @@ class MochiRemote(Mochitest):
|
|||
options.utilityPath = self.findPath(paths, xpcshell)
|
||||
|
||||
if options.utilityPath == None:
|
||||
log.error("unable to find utility path for %s, please specify with --utility-path" % os.name)
|
||||
log.error("unable to find utility path for %s, please specify with --utility-path", os.name)
|
||||
sys.exit(1)
|
||||
|
||||
xpcshell_path = os.path.join(options.utilityPath, xpcshell)
|
||||
|
@ -414,28 +409,26 @@ class MochiRemote(Mochitest):
|
|||
def addLogData(self):
|
||||
with open(self.localLog) as currentLog:
|
||||
data = currentLog.readlines()
|
||||
|
||||
restart = re.compile('0 INFO SimpleTest START.*')
|
||||
reend = re.compile('([0-9]+) INFO TEST-START . Shutdown.*')
|
||||
refail = re.compile('([0-9]+) INFO TEST-UNEXPECTED-FAIL.*')
|
||||
start_found = False
|
||||
end_found = False
|
||||
fail_found = False
|
||||
for line in data:
|
||||
try:
|
||||
message = json.loads(line)
|
||||
if not isinstance(message, dict) or not 'action' in message:
|
||||
continue
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if message['action'] == 'test_end':
|
||||
if reend.match(line):
|
||||
end_found = True
|
||||
start_found = False
|
||||
break
|
||||
|
||||
if start_found and not end_found:
|
||||
self.logMessages.append(message)
|
||||
# Append the line without the number to increment
|
||||
self.logLines.append(' '.join(line.split(' ')[1:]))
|
||||
|
||||
if message['action'] == 'test_start':
|
||||
if restart.match(line):
|
||||
start_found = True
|
||||
if 'expected' in message:
|
||||
if refail.match(line):
|
||||
fail_found = True
|
||||
result = 0
|
||||
if fail_found:
|
||||
|
@ -452,15 +445,12 @@ class MochiRemote(Mochitest):
|
|||
incr = 1
|
||||
logFile = []
|
||||
logFile.append("0 INFO SimpleTest START")
|
||||
for message in self.logMessages:
|
||||
if 'status' not in message:
|
||||
continue
|
||||
|
||||
if 'expected' in message:
|
||||
failed += 1
|
||||
elif message['status'] == 'PASS':
|
||||
for line in self.logLines:
|
||||
if line.startswith("INFO TEST-PASS"):
|
||||
passed += 1
|
||||
elif message['status'] == 'FAIL':
|
||||
elif line.startswith("INFO TEST-UNEXPECTED"):
|
||||
failed += 1
|
||||
elif line.startswith("INFO TEST-KNOWN"):
|
||||
todo += 1
|
||||
incr += 1
|
||||
|
||||
|
@ -492,25 +482,25 @@ class MochiRemote(Mochitest):
|
|||
printed = 0
|
||||
for name in self._dm.listFiles(screenShotDir):
|
||||
fullName = screenShotDir + "/" + name
|
||||
log.info("SCREENSHOT: FOUND: [%s]" % fullName)
|
||||
log.info("SCREENSHOT: FOUND: [%s]", fullName)
|
||||
try:
|
||||
image = self._dm.pullFile(fullName)
|
||||
encoded = base64.b64encode(image)
|
||||
log.info("SCREENSHOT: data:image/jpg;base64,%s" % encoded)
|
||||
log.info("SCREENSHOT: data:image/jpg;base64,%s", encoded)
|
||||
printed += 1
|
||||
except:
|
||||
log.info("SCREENSHOT: Could not be parsed")
|
||||
pass
|
||||
|
||||
log.info("SCREENSHOT: TOTAL PRINTED: [%s]" % printed)
|
||||
log.info("SCREENSHOT: TOTAL PRINTED: [%s]", printed)
|
||||
|
||||
def printDeviceInfo(self, printLogcat=False):
|
||||
try:
|
||||
if printLogcat:
|
||||
logcat = self._dm.getLogcat(filterOutRegexps=fennecLogcatFilters)
|
||||
log.info('\n' + ''.join(logcat).decode('utf-8', 'replace'))
|
||||
log.info("Device info: %s" % self._dm.getInfo())
|
||||
log.info("Test root: %s" % self._dm.getDeviceRoot())
|
||||
log.info('\n'+(''.join(logcat)))
|
||||
log.info("Device info: %s", self._dm.getInfo())
|
||||
log.info("Test root: %s", self._dm.getDeviceRoot())
|
||||
except devicemanager.DMError:
|
||||
log.warn("Error getting device information")
|
||||
|
||||
|
@ -531,9 +521,9 @@ class MochiRemote(Mochitest):
|
|||
for key, value in browserEnv.items():
|
||||
try:
|
||||
value.index(',')
|
||||
log.error("buildRobotiumConfig: browserEnv - Found a ',' in our value, unable to process value. key=%s,value=%s" % (key, value))
|
||||
log.error("browserEnv=%s" % browserEnv)
|
||||
except ValueError:
|
||||
log.error("buildRobotiumConfig: browserEnv - Found a ',' in our value, unable to process value. key=%s,value=%s", key, value)
|
||||
log.error("browserEnv=%s", browserEnv)
|
||||
except ValueError, e:
|
||||
envstr += "%s%s=%s" % (delim, key, value)
|
||||
delim = ","
|
||||
|
||||
|
@ -561,85 +551,10 @@ class MochiRemote(Mochitest):
|
|||
# it trying to set up ssltunnel as well
|
||||
kwargs['runSSLTunnel'] = False
|
||||
|
||||
if 'quiet' in kwargs:
|
||||
kwargs.pop('quiet')
|
||||
|
||||
return self._automation.runApp(*args, **kwargs)
|
||||
|
||||
class RobocopMochiRemote(MochiRemote):
|
||||
"""This class maintains compatibility with the robocop logging system
|
||||
that is still unstructured."""
|
||||
|
||||
def addLogData(self):
|
||||
with open(self.localLog) as currentLog:
|
||||
data = currentLog.readlines()
|
||||
|
||||
restart = re.compile('SimpleTest START.*')
|
||||
reend = re.compile('TEST-START . Shutdown.*')
|
||||
refail = re.compile('TEST-UNEXPECTED-FAIL.*')
|
||||
start_found = False
|
||||
end_found = False
|
||||
fail_found = False
|
||||
for line in data:
|
||||
if reend.match(line):
|
||||
end_found = True
|
||||
start_found = False
|
||||
break
|
||||
|
||||
if start_found and not end_found:
|
||||
self.logMessages.append(line)
|
||||
|
||||
if restart.match(line):
|
||||
start_found = True
|
||||
if refail.match(line):
|
||||
fail_found = True
|
||||
result = 0
|
||||
if fail_found:
|
||||
result = 1
|
||||
if not end_found:
|
||||
log.error("Automation Error: Missing end of test marker (process crashed?)")
|
||||
result = 1
|
||||
return result
|
||||
|
||||
def printLog(self):
|
||||
passed = 0
|
||||
failed = 0
|
||||
todo = 0
|
||||
incr = 1
|
||||
logFile = []
|
||||
logFile.append("0 INFO SimpleTest START")
|
||||
for line in self.logMessages:
|
||||
if line.startswith("TEST-PASS"):
|
||||
passed += 1
|
||||
elif line.startswith("TEST-UNEXPECTED"):
|
||||
failed += 1
|
||||
elif line.startswith("TEST-KNOWN"):
|
||||
todo += 1
|
||||
incr += 1
|
||||
|
||||
logFile.append("%s INFO TEST-START | Shutdown" % incr)
|
||||
incr += 1
|
||||
logFile.append("%s INFO Passed: %s" % (incr, passed))
|
||||
incr += 1
|
||||
logFile.append("%s INFO Failed: %s" % (incr, failed))
|
||||
incr += 1
|
||||
logFile.append("%s INFO Todo: %s" % (incr, todo))
|
||||
incr += 1
|
||||
logFile.append("%s INFO SimpleTest FINISHED" % incr)
|
||||
|
||||
# TODO: Consider not printing to stdout because we might be duplicating output
|
||||
print '\n'.join(logFile)
|
||||
with open(self.localLog, 'w') as localLog:
|
||||
localLog.write('\n'.join(logFile))
|
||||
|
||||
if failed > 0:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def main():
|
||||
message_logger = MessageLogger(logger=log)
|
||||
process_args = {'messageLogger': message_logger}
|
||||
auto = RemoteAutomation(None, "fennec", processArgs=process_args)
|
||||
auto = RemoteAutomation(None, "fennec")
|
||||
parser = RemoteOptions(auto)
|
||||
options, args = parser.parse_args()
|
||||
|
||||
|
@ -663,8 +578,7 @@ def main():
|
|||
auto.setProduct(options.remoteProductName)
|
||||
auto.setAppName(options.remoteappname)
|
||||
|
||||
mochitest_cls = RobocopMochiRemote if options.robocopIni != "" else MochiRemote
|
||||
mochitest = mochitest_cls(auto, dm, options, message_logger)
|
||||
mochitest = MochiRemote(auto, dm, options)
|
||||
|
||||
options = parser.verifyOptions(options, mochitest)
|
||||
if (options == None):
|
||||
|
@ -707,8 +621,6 @@ def main():
|
|||
my_tests = tests
|
||||
for test in robocop_tests:
|
||||
tests.append(test['name'])
|
||||
# suite_start message when running robocop tests
|
||||
log.suite_start(tests)
|
||||
|
||||
if options.totalChunks:
|
||||
tests_per_chunk = math.ceil(len(tests) / (options.totalChunks * 1.0))
|
||||
|
@ -717,7 +629,7 @@ def main():
|
|||
if end > len(tests):
|
||||
end = len(tests)
|
||||
my_tests = tests[start:end]
|
||||
log.info("Running tests %d-%d/%d" % (start+1, end, len(tests)))
|
||||
log.info("Running tests %d-%d/%d", start+1, end, len(tests))
|
||||
|
||||
dm.removeFile(os.path.join(deviceRoot, "fennec_ids.txt"))
|
||||
fennec_ids = os.path.abspath(os.path.join(SCRIPT_DIR, "fennec_ids.txt"))
|
||||
|
@ -786,7 +698,7 @@ def main():
|
|||
dm.recordLogcat()
|
||||
result = mochitest.runTests(options)
|
||||
if result != 0:
|
||||
log.error("runTests() exited with code %s" % result)
|
||||
log.error("runTests() exited with code %s", result)
|
||||
log_result = mochitest.addLogData()
|
||||
if result != 0 or log_result != 0:
|
||||
mochitest.printDeviceInfo(printLogcat=True)
|
||||
|
@ -815,7 +727,7 @@ def main():
|
|||
if (options.dm_trans == "sut"):
|
||||
dm._runCmds([{"cmd": " ".join(cmd_del)}])
|
||||
if retVal is None:
|
||||
log.warning("No tests run. Did you pass an invalid TEST_PATH?")
|
||||
log.warn("No tests run. Did you pass an invalid TEST_PATH?")
|
||||
retVal = 1
|
||||
else:
|
||||
# if we didn't have some kind of error running the tests, make
|
||||
|
|
|
@ -53,7 +53,7 @@ MemoryStats.constructPathname = function (directory, basename) {
|
|||
return d.path;
|
||||
}
|
||||
|
||||
MemoryStats.dump = function (logger,
|
||||
MemoryStats.dump = function (dumpFn,
|
||||
testNumber,
|
||||
testURL,
|
||||
dumpOutputDirectory,
|
||||
|
@ -75,9 +75,9 @@ MemoryStats.dump = function (logger,
|
|||
MemoryStats._hasMemoryStatistics[stat] = supported;
|
||||
}
|
||||
if (supported == MEM_STAT_SUPPORTED) {
|
||||
logger.info("MEMORY STAT " + stat + " after test: " + mrm[stat]);
|
||||
dumpFn("TEST-INFO | MEMORY STAT " + stat + " after test: " + mrm[stat]);
|
||||
} else if (firstAccess) {
|
||||
logger.info("MEMORY STAT " + stat + " not supported in this build configuration.");
|
||||
dumpFn("TEST-INFO | MEMORY STAT " + stat + " not supported in this build configuration.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,19 +85,20 @@ MemoryStats.dump = function (logger,
|
|||
var basename = "about-memory-" + testNumber + ".json.gz";
|
||||
var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory,
|
||||
basename);
|
||||
logger.info(testURL + " | MEMDUMP-START " + dumpfile);
|
||||
dumpFn("TEST-INFO | " + testURL + " | MEMDUMP-START " + dumpfile);
|
||||
var md = MemoryStats._getService("@mozilla.org/memory-info-dumper;1",
|
||||
"nsIMemoryInfoDumper");
|
||||
md.dumpMemoryReportsToNamedFile(dumpfile, function () {
|
||||
logger.info("TEST-INFO | " + testURL + " | MEMDUMP-END");
|
||||
dumpFn("TEST-INFO | " + testURL + " | MEMDUMP-END");
|
||||
}, null, /* anonymize = */ false);
|
||||
|
||||
}
|
||||
|
||||
if (dumpDMD && typeof(DMDReportAndDump) != undefined) {
|
||||
var basename = "dmd-" + testNumber + ".txt";
|
||||
var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory,
|
||||
basename);
|
||||
logger.info(testURL + " | DMD-DUMP " + dumpfile);
|
||||
dumpFn("TEST-INFO | " + testURL + " | DMD-DUMP " + dumpfile);
|
||||
DMDReportAndDump(dumpfile);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -91,7 +91,7 @@ if (typeof(repr) == 'undefined') {
|
|||
}
|
||||
return ostring;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/* This returns a function that applies the previously given parameters.
|
||||
* This is used by SimpleTest.showReport
|
||||
|
@ -116,7 +116,7 @@ if (typeof(partial) == 'undefined') {
|
|||
if (typeof(getElement) == 'undefined') {
|
||||
this.getElement = function(id) {
|
||||
return ((typeof(id) == "string") ?
|
||||
document.getElementById(id) : id);
|
||||
document.getElementById(id) : id);
|
||||
};
|
||||
this.$ = this.getElement;
|
||||
}
|
||||
|
@ -190,10 +190,10 @@ if (typeof(computedStyle) == 'undefined') {
|
|||
if (typeof(style) == 'undefined' || style === null) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
||||
var selectorCase = cssProperty.replace(/([A-Z])/g, '-$1'
|
||||
).toLowerCase();
|
||||
|
||||
|
||||
return style.getPropertyValue(selectorCase);
|
||||
};
|
||||
}
|
||||
|
@ -235,9 +235,7 @@ SimpleTest._cleanupFunctions = [];
|
|||
**/
|
||||
SimpleTest.ok = function (condition, name, diag) {
|
||||
var test = {'result': !!condition, 'name': name, 'diag': diag};
|
||||
var successInfo = {status:"PASS", expected:"PASS", message:"TEST-PASS"};
|
||||
var failureInfo = {status:"FAIL", expected:"PASS", message:"TEST-UNEXPECTED-FAIL"};
|
||||
SimpleTest._logResult(test, successInfo, failureInfo);
|
||||
SimpleTest._logResult(test, "TEST-PASS", "TEST-UNEXPECTED-FAIL");
|
||||
SimpleTest._tests.push(test);
|
||||
};
|
||||
|
||||
|
@ -286,9 +284,7 @@ SimpleTest.doesThrow = function(fn, name) {
|
|||
|
||||
SimpleTest.todo = function(condition, name, diag) {
|
||||
var test = {'result': !!condition, 'name': name, 'diag': diag, todo: true};
|
||||
var successInfo = {status:"PASS", expected:"FAIL", message:"TEST-UNEXPECTED-PASS"};
|
||||
var failureInfo = {status:"FAIL", expected:"FAIL", message:"TEST-KNOWN-FAIL"};
|
||||
SimpleTest._logResult(test, successInfo, failureInfo);
|
||||
SimpleTest._logResult(test, "TEST-UNEXPECTED-PASS", "TEST-KNOWN-FAIL");
|
||||
SimpleTest._tests.push(test);
|
||||
};
|
||||
|
||||
|
@ -317,65 +313,130 @@ SimpleTest._getCurrentTestURL = function() {
|
|||
"unknown test url";
|
||||
};
|
||||
|
||||
SimpleTest._forceLogMessageOutput = false;
|
||||
SimpleTest._forceLogMessageOutput = parentRunner && !parentRunner.quiet;
|
||||
|
||||
/**
|
||||
* Force all test messages to be displayed. Only applies for the current test.
|
||||
*/
|
||||
SimpleTest.requestCompleteLog = function() {
|
||||
if (!parentRunner || SimpleTest._forceLogMessageOutput) {
|
||||
if (SimpleTest._forceLogMessageOutput)
|
||||
return;
|
||||
}
|
||||
|
||||
parentRunner.structuredLogger.deactivateBuffering();
|
||||
SimpleTest._forceLogMessageOutput = true;
|
||||
|
||||
SimpleTest.registerCleanupFunction(function() {
|
||||
parentRunner.structuredLogger.activateBuffering();
|
||||
SimpleTest._forceLogMessageOutput = false;
|
||||
});
|
||||
};
|
||||
|
||||
SimpleTest._logResult = function (test, passInfo, failInfo) {
|
||||
var url = SimpleTest._getCurrentTestURL();
|
||||
var result = test.result ? passInfo : failInfo;
|
||||
var diagnostic = test.diag || null;
|
||||
// BUGFIX : coercing test.name to a string, because some a11y tests pass an xpconnect object
|
||||
var subtest = test.name ? String(test.name) : null;
|
||||
var isError = !test.result == !test.todo;
|
||||
/**
|
||||
* A circular buffer, managed by _logResult. We explicitly manage the
|
||||
* circularness of the buffer, rather than resorting to .shift()/.push()
|
||||
* because explicit management is much faster.
|
||||
*/
|
||||
SimpleTest._bufferedMessages = [];
|
||||
SimpleTest._logResult = (function () {
|
||||
var bufferingThreshold = 100;
|
||||
var outputIndex = 0;
|
||||
|
||||
if (parentRunner) {
|
||||
if (!result.status || !result.expected) {
|
||||
if (diagnostic) {
|
||||
parentRunner.structuredLogger.info(diagnostic);
|
||||
function logResult(test, passString, failString) {
|
||||
var url = SimpleTest._getCurrentTestURL();
|
||||
var resultString = test.result ? passString : failString;
|
||||
var diagnostic = test.name + (test.diag ? " - " + test.diag : "");
|
||||
var msg = [resultString, url, diagnostic].join(" | ");
|
||||
var isError = !test.result == !test.todo;
|
||||
|
||||
// Due to JavaScript's name lookup rules, it is important that
|
||||
// the second parameter here be named identically to the isError
|
||||
// variable declared above.
|
||||
function dumpMessage(msg, isError) {
|
||||
if (parentRunner) {
|
||||
if (isError) {
|
||||
parentRunner.addFailedTest(url);
|
||||
parentRunner.error(msg);
|
||||
} else {
|
||||
parentRunner.log(msg);
|
||||
}
|
||||
} else if (typeof dump === "function") {
|
||||
dump(msg + "\n");
|
||||
} else {
|
||||
// Non-Mozilla browser? Just do nothing.
|
||||
}
|
||||
}
|
||||
|
||||
// Detect when SimpleTest.reset() has been called, so we can
|
||||
// reset outputIndex. We store outputIndex as local state to
|
||||
// avoid adding even more state to SimpleTest.
|
||||
if (SimpleTest._bufferedMessages.length == 0) {
|
||||
outputIndex = 0;
|
||||
}
|
||||
|
||||
// We want to eliminate mundane TEST-PASS/TEST-KNOWN-FAIL
|
||||
// output, since some tests produce tens of thousands of of such
|
||||
// messages. These messages can consume a lot of memory to
|
||||
// generate and take a significant amount of time to output.
|
||||
// However, the reality is that TEST-PASS messages can also be
|
||||
// used as a form of logging via constructs like:
|
||||
//
|
||||
// SimpleTest.ok(true, "some informative message");
|
||||
//
|
||||
// And eliding the logging can be very confusing when trying to
|
||||
// debug test failures.
|
||||
//
|
||||
// Hence the compromise adopted here: We buffer messages up to
|
||||
// some limit and dump the buffer when a test failure happens.
|
||||
// This behavior ought to provide enough context for developers
|
||||
// looking to understand where in the test things failed.
|
||||
if (isError) {
|
||||
// Display this message and all the messages we have buffered.
|
||||
if (SimpleTest._bufferedMessages.length > 0) {
|
||||
dumpMessage("TEST-INFO | dumping last " + SimpleTest._bufferedMessages.length + " message(s)");
|
||||
dumpMessage("TEST-INFO | if you need more context, please use SimpleTest.requestCompleteLog() in your test");
|
||||
|
||||
function dumpBufferedMessage(m) {
|
||||
dumpMessage(m, false);
|
||||
}
|
||||
// The latest message is just before outputIndex.
|
||||
// The earliest message is located at outputIndex.
|
||||
var earliest = SimpleTest._bufferedMessages.slice(outputIndex);
|
||||
var latest = SimpleTest._bufferedMessages.slice(0, outputIndex);
|
||||
earliest.map(dumpBufferedMessage);
|
||||
latest.map(dumpBufferedMessage);
|
||||
|
||||
SimpleTest._bufferedMessages = [];
|
||||
}
|
||||
|
||||
dumpMessage(msg);
|
||||
return;
|
||||
}
|
||||
|
||||
var runningSingleTest = ((parentRunner &&
|
||||
parentRunner._urls.length == 1) ||
|
||||
isSingleTestRun);
|
||||
var shouldLogImmediately = (runningSingleTest ||
|
||||
SimpleTest._forceLogMessageOutput);
|
||||
|
||||
if (!shouldLogImmediately) {
|
||||
// Buffer the message for possible later output.
|
||||
if (SimpleTest._bufferedMessages.length >= bufferingThreshold) {
|
||||
if (outputIndex >= bufferingThreshold) {
|
||||
outputIndex = 0;
|
||||
}
|
||||
SimpleTest._bufferedMessages[outputIndex] = msg;
|
||||
outputIndex++;
|
||||
} else {
|
||||
SimpleTest._bufferedMessages.push(msg);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (isError) {
|
||||
parentRunner.addFailedTest(url);
|
||||
}
|
||||
|
||||
parentRunner.structuredLogger.testStatus(url,
|
||||
subtest,
|
||||
result.status,
|
||||
result.expected,
|
||||
diagnostic);
|
||||
} else if (typeof dump === "function") {
|
||||
var debugMsg = [url, result.message, diagnostic].join(' | ');
|
||||
dump(debugMsg + "\n");
|
||||
} else {
|
||||
// Non-Mozilla browser? Just do nothing.
|
||||
dumpMessage(msg);
|
||||
}
|
||||
};
|
||||
|
||||
return logResult;
|
||||
})();
|
||||
|
||||
SimpleTest.info = function(name, message) {
|
||||
var log = name + ' | ' + message;
|
||||
if (parentRunner) {
|
||||
parentRunner.structuredLogger.info(log);
|
||||
} else {
|
||||
dump(log + '\n');
|
||||
}
|
||||
SimpleTest._logResult({result:true, name:name, diag:message}, "TEST-INFO");
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -470,14 +531,14 @@ SimpleTest.toggleByClass = function (cls, evt) {
|
|||
var clsName = child.className;
|
||||
if (!clsName) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
var classNames = clsName.split(' ');
|
||||
for (var j = 0; j < classNames.length; j++) {
|
||||
if (classNames[j] == cls) {
|
||||
elements.push(child);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (var t=0; t<elements.length; t++) {
|
||||
//TODO: again, for-in loop over elems seems to break this
|
||||
|
@ -803,12 +864,7 @@ SimpleTest.registerCleanupFunction = function(aFunc) {
|
|||
**/
|
||||
SimpleTest.finish = function() {
|
||||
if (SimpleTest._alreadyFinished) {
|
||||
var err = "[SimpleTest.finish()] this test already called finish!";
|
||||
if (parentRunner) {
|
||||
parentRunner.structuredLogger.error(err);
|
||||
} else {
|
||||
dump(err + '\n');
|
||||
}
|
||||
SimpleTest.ok(false, "[SimpleTest.finish()] this test already called finish!");
|
||||
}
|
||||
|
||||
SimpleTest._alreadyFinished = true;
|
||||
|
|
|
@ -65,158 +65,6 @@ function flattenArguments(lst/* ...*/) {
|
|||
return res;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* StructuredLogger: Structured logger class following the mozlog.structured protocol
|
||||
*
|
||||
*
|
||||
**/
|
||||
var VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
|
||||
function StructuredLogger(name) {
|
||||
this.name = name;
|
||||
this.testsStarted = [];
|
||||
|
||||
/* test logs */
|
||||
|
||||
this.testStart = function(test) {
|
||||
var data = {test: test};
|
||||
this._logData("test_start", data);
|
||||
};
|
||||
|
||||
this.testStatus = function(test, subtest, status, expected="PASS", message=null) {
|
||||
var data = {test: test, subtest: subtest, status: status};
|
||||
|
||||
if (message !== null) {
|
||||
data.message = message;
|
||||
}
|
||||
if (expected != status && status != 'SKIP') {
|
||||
data.expected = expected;
|
||||
}
|
||||
|
||||
this._logData("test_status", data);
|
||||
};
|
||||
|
||||
this.testEnd = function(test, status, expected="OK", message=null, extra=null) {
|
||||
var data = {test: test, status: status};
|
||||
|
||||
if (message !== null) {
|
||||
data.message = message;
|
||||
}
|
||||
if (expected != status) {
|
||||
data.expected = expected;
|
||||
}
|
||||
if (extra !== null) {
|
||||
data.extra = extra;
|
||||
}
|
||||
|
||||
if (!this.testsStarted.pop(test)) {
|
||||
this.error("Test '" + test + "' was ended more than once or never started. " +
|
||||
"Ended with this data: " + JSON.stringify(data));
|
||||
return;
|
||||
}
|
||||
|
||||
this._logData("test_end", data);
|
||||
};
|
||||
|
||||
this.suiteStart = function(tests, runinfo) {
|
||||
runinfo = typeof runinfo !== "undefined" ? runinfo : null;
|
||||
|
||||
var data = {tests: tests};
|
||||
if (runinfo !== null) {
|
||||
data.runinfo = runinfo;
|
||||
}
|
||||
|
||||
this._logData("suite_start", data);
|
||||
};
|
||||
|
||||
this.suiteEnd = function() {
|
||||
this._logData("suite_end");
|
||||
};
|
||||
|
||||
this.testStart = function(test) {
|
||||
this.testsStarted.push(test);
|
||||
var data = {test: test};
|
||||
this._logData("test_start", data);
|
||||
};
|
||||
|
||||
/* log action: human readable logs */
|
||||
|
||||
this._log = function(level, message) {
|
||||
// Coercing the message parameter to a string, in case an invalid value is passed.
|
||||
message = String(message);
|
||||
var data = {level: level, message: message};
|
||||
this._logData('log', data);
|
||||
};
|
||||
|
||||
this.debug = function(message) {
|
||||
this._log('DEBUG', message);
|
||||
};
|
||||
|
||||
this.info = function(message) {
|
||||
this._log('INFO', message);
|
||||
};
|
||||
|
||||
this.warning = function(message) {
|
||||
this._log('WARNING', message);
|
||||
};
|
||||
|
||||
this.error = function(message) {
|
||||
this._log("ERROR", message);
|
||||
};
|
||||
|
||||
this.critical = function(message) {
|
||||
this._log('CRITICAL', message);
|
||||
};
|
||||
|
||||
/* Special mochitest messages for deactivating/activating buffering */
|
||||
|
||||
this.deactivateBuffering = function() {
|
||||
this._logData("buffering_off");
|
||||
};
|
||||
this.activateBuffering = function() {
|
||||
this._logData("buffering_on");
|
||||
};
|
||||
|
||||
/* dispatches a log to handlers */
|
||||
|
||||
this._logData = function(action, data) {
|
||||
data = typeof data !== "undefined" ? data : null;
|
||||
|
||||
if (data === null) {
|
||||
data = {};
|
||||
}
|
||||
|
||||
var allData = {action: action,
|
||||
time: new Date().getTime(),
|
||||
thread: null,
|
||||
pid: null,
|
||||
source: this.name};
|
||||
|
||||
for (var attrname in data) {
|
||||
allData[attrname] = data[attrname];
|
||||
}
|
||||
|
||||
this._dumpMessage(allData);
|
||||
};
|
||||
|
||||
this._dumpMessage = function(message) {
|
||||
var str = JSON.stringify(message);
|
||||
// BUGFIX: browser-chrome tests doesn't use LogController
|
||||
if (Object.keys(LogController.listeners).length !== 0) {
|
||||
LogController.log(str);
|
||||
} else {
|
||||
dump('\n' + str + '\n');
|
||||
}
|
||||
};
|
||||
|
||||
/* Message validation. Only checking the action for now */
|
||||
this.validMessage = function(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* TestRunner: A test runner for SimpleTest
|
||||
* TODO:
|
||||
|
@ -243,6 +91,7 @@ TestRunner.runSlower = false;
|
|||
TestRunner.dumpOutputDirectory = "";
|
||||
TestRunner.dumpAboutMemoryAfterTest = false;
|
||||
TestRunner.dumpDMDAfterTest = false;
|
||||
TestRunner.quiet = false;
|
||||
TestRunner.slowestTestTime = 0;
|
||||
TestRunner.slowestTestURL = "";
|
||||
|
||||
|
@ -277,7 +126,6 @@ TestRunner._checkForHangs = function() {
|
|||
if (runtime >= TestRunner.timeout * TestRunner._timeoutFactor) {
|
||||
var frameWindow = $('testframe').contentWindow.wrappedJSObject ||
|
||||
$('testframe').contentWindow;
|
||||
// TODO : Do this in a way that reports that the test ended with a status "TIMEOUT"
|
||||
reportError(frameWindow, "Test timed out.");
|
||||
|
||||
// If we have too many timeouts, give up. We don't want to wait hours
|
||||
|
@ -357,11 +205,11 @@ TestRunner.generateFailureList = function () {
|
|||
/**
|
||||
* If logEnabled is true, this is the logger that will be used.
|
||||
**/
|
||||
TestRunner.structuredLogger = new StructuredLogger('mochitest');
|
||||
TestRunner.logger = LogController;
|
||||
|
||||
TestRunner.log = function(msg) {
|
||||
if (TestRunner.logEnabled) {
|
||||
TestRunner.structuredLogger.info(msg);
|
||||
TestRunner.logger.log(msg);
|
||||
} else {
|
||||
dump(msg + "\n");
|
||||
}
|
||||
|
@ -369,7 +217,7 @@ TestRunner.log = function(msg) {
|
|||
|
||||
TestRunner.error = function(msg) {
|
||||
if (TestRunner.logEnabled) {
|
||||
TestRunner.structuredLogger.error(msg);
|
||||
TestRunner.logger.error(msg);
|
||||
} else {
|
||||
dump(msg + "\n");
|
||||
}
|
||||
|
@ -416,7 +264,7 @@ TestRunner._makeIframe = function (url, retry) {
|
|||
return;
|
||||
}
|
||||
|
||||
TestRunner.structuredLogger.error("Unable to restore focus, expect failures and timeouts.");
|
||||
TestRunner.log("Error: Unable to restore focus, expect failures and timeouts.");
|
||||
}
|
||||
window.scrollTo(0, $('indicator').offsetTop);
|
||||
iframe.src = url;
|
||||
|
@ -446,13 +294,12 @@ TestRunner.getLoadedTestURL = function () {
|
|||
*
|
||||
**/
|
||||
TestRunner.runTests = function (/*url...*/) {
|
||||
TestRunner.structuredLogger.info("SimpleTest START");
|
||||
TestRunner.log("SimpleTest START");
|
||||
TestRunner.originalTestURL = $("current-test").innerHTML;
|
||||
|
||||
SpecialPowers.registerProcessCrashObservers();
|
||||
|
||||
TestRunner._urls = flattenArguments(arguments);
|
||||
|
||||
$('testframe').src="";
|
||||
TestRunner._checkForHangs();
|
||||
TestRunner.runNextTest();
|
||||
|
@ -467,7 +314,7 @@ TestRunner.resetTests = function(listURLs) {
|
|||
// Reset our "Current-test" line - functionality depends on it
|
||||
$("current-test").innerHTML = TestRunner.originalTestURL;
|
||||
if (TestRunner.logEnabled)
|
||||
TestRunner.structuredLogger.info("SimpleTest START Loop " + TestRunner._currentLoop);
|
||||
TestRunner.log("SimpleTest START Loop " + TestRunner._currentLoop);
|
||||
|
||||
TestRunner._urls = listURLs;
|
||||
$('testframe').src="";
|
||||
|
@ -493,49 +340,41 @@ TestRunner.runNextTest = function() {
|
|||
TestRunner._expectedMinAsserts = 0;
|
||||
TestRunner._expectedMaxAsserts = 0;
|
||||
|
||||
TestRunner.structuredLogger.testStart(url);
|
||||
TestRunner.log("TEST-START | " + url); // used by automation.py
|
||||
|
||||
TestRunner._makeIframe(url, 0);
|
||||
} else {
|
||||
$("current-test").innerHTML = "<b>Finished</b>";
|
||||
TestRunner._makeIframe("about:blank", 0);
|
||||
|
||||
var passCount = parseInt($("pass-count").innerHTML, 10);
|
||||
var failCount = parseInt($("fail-count").innerHTML, 10);
|
||||
var todoCount = parseInt($("todo-count").innerHTML, 10);
|
||||
|
||||
if (passCount === 0 &&
|
||||
failCount === 0 &&
|
||||
todoCount === 0)
|
||||
if (parseInt($("pass-count").innerHTML) == 0 &&
|
||||
parseInt($("fail-count").innerHTML) == 0 &&
|
||||
parseInt($("todo-count").innerHTML) == 0)
|
||||
{
|
||||
// No |$('testframe').contentWindow|, so manually update: ...
|
||||
// ... the log,
|
||||
TestRunner.structuredLogger.testEnd('SimpleTest/TestRunner.js',
|
||||
"ERROR",
|
||||
"OK",
|
||||
"No checks actually run");
|
||||
// ... the count,
|
||||
$("fail-count").innerHTML = 1;
|
||||
// ... the indicator.
|
||||
var indicator = $("indicator");
|
||||
indicator.innerHTML = "Status: Fail (No checks actually run)";
|
||||
indicator.style.backgroundColor = "red";
|
||||
// No |$('testframe').contentWindow|, so manually update: ...
|
||||
// ... the log,
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | (SimpleTest/TestRunner.js) | No checks actually run.");
|
||||
// ... the count,
|
||||
$("fail-count").innerHTML = 1;
|
||||
// ... the indicator.
|
||||
var indicator = $("indicator");
|
||||
indicator.innerHTML = "Status: Fail (No checks actually run)";
|
||||
indicator.style.backgroundColor = "red";
|
||||
}
|
||||
|
||||
SpecialPowers.unregisterProcessCrashObservers();
|
||||
|
||||
TestRunner.structuredLogger.info("TEST-START | Shutdown");
|
||||
TestRunner.structuredLogger.info("Passed: " + passCount);
|
||||
TestRunner.structuredLogger.info("Failed: " + failCount);
|
||||
TestRunner.structuredLogger.info("Todo: " + todoCount);
|
||||
TestRunner.structuredLogger.info("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL);
|
||||
|
||||
TestRunner.log("TEST-START | Shutdown"); // used by automation.py
|
||||
TestRunner.log("Passed: " + $("pass-count").innerHTML);
|
||||
TestRunner.log("Failed: " + $("fail-count").innerHTML);
|
||||
TestRunner.log("Todo: " + $("todo-count").innerHTML);
|
||||
TestRunner.log("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL);
|
||||
// If we are looping, don't send this cause it closes the log file
|
||||
if (TestRunner.repeat === 0) {
|
||||
TestRunner.structuredLogger.info("SimpleTest FINISHED");
|
||||
if (TestRunner.repeat == 0) {
|
||||
TestRunner.log("SimpleTest FINISHED");
|
||||
}
|
||||
|
||||
if (TestRunner.repeat === 0 && TestRunner.onComplete) {
|
||||
if (TestRunner.repeat == 0 && TestRunner.onComplete) {
|
||||
TestRunner.onComplete();
|
||||
}
|
||||
|
||||
|
@ -546,8 +385,8 @@ TestRunner.runNextTest = function() {
|
|||
} else {
|
||||
// Loops are finished
|
||||
if (TestRunner.logEnabled) {
|
||||
TestRunner.structuredLogger.info("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops");
|
||||
TestRunner.structuredLogger.info("SimpleTest FINISHED");
|
||||
TestRunner.log("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops");
|
||||
TestRunner.log("SimpleTest FINISHED");
|
||||
}
|
||||
|
||||
if (TestRunner.onComplete)
|
||||
|
@ -569,19 +408,16 @@ TestRunner.testFinished = function(tests) {
|
|||
// have a chance to unload it.
|
||||
if (TestRunner._currentTest == TestRunner._lastTestFinished &&
|
||||
!TestRunner._loopIsRestarting) {
|
||||
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
|
||||
"ERROR",
|
||||
"OK",
|
||||
"called finish() multiple times");
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | called finish() multiple times");
|
||||
TestRunner.updateUI([{ result: false }]);
|
||||
return;
|
||||
}
|
||||
TestRunner._lastTestFinished = TestRunner._currentTest;
|
||||
TestRunner._loopIsRestarting = false;
|
||||
|
||||
// TODO : replace this by a function that returns the mem data as an object
|
||||
// that's dumped later with the test_end message
|
||||
MemoryStats.dump(TestRunner.structuredLogger, TestRunner._currentTest,
|
||||
MemoryStats.dump(TestRunner.log, TestRunner._currentTest,
|
||||
TestRunner.currentTestURL,
|
||||
TestRunner.dumpOutputDirectory,
|
||||
TestRunner.dumpAboutMemoryAfterTest,
|
||||
|
@ -589,51 +425,41 @@ TestRunner.testFinished = function(tests) {
|
|||
|
||||
function cleanUpCrashDumpFiles() {
|
||||
if (!SpecialPowers.removeExpectedCrashDumpFiles(TestRunner._expectingProcessCrash)) {
|
||||
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
|
||||
"ERROR",
|
||||
"OK",
|
||||
"This test did not leave any crash dumps behind, but we were expecting some!");
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | This test did not leave any crash dumps behind, but we were expecting some!");
|
||||
tests.push({ result: false });
|
||||
}
|
||||
var unexpectedCrashDumpFiles =
|
||||
SpecialPowers.findUnexpectedCrashDumpFiles();
|
||||
TestRunner._expectingProcessCrash = false;
|
||||
if (unexpectedCrashDumpFiles.length) {
|
||||
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
|
||||
"ERROR",
|
||||
"OK",
|
||||
"This test left crash dumps behind, but we " +
|
||||
"weren't expecting it to!",
|
||||
{unexpected_crashdump_files: unexpectedCrashDumpFiles});
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | This test left crash dumps behind, but we " +
|
||||
"weren't expecting it to!");
|
||||
tests.push({ result: false });
|
||||
unexpectedCrashDumpFiles.sort().forEach(function(aFilename) {
|
||||
TestRunner.structuredLogger.info("Found unexpected crash dump file " +
|
||||
aFilename + ".");
|
||||
TestRunner.log("TEST-INFO | Found unexpected crash dump file " +
|
||||
aFilename + ".");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function runNextTest() {
|
||||
if (TestRunner.currentTestURL != TestRunner.getLoadedTestURL()) {
|
||||
TestRunner.structuredLogger.testStatus(TestRunner.currentTestURL,
|
||||
TestRunner.getLoadedTestURL(),
|
||||
"ERROR",
|
||||
"OK",
|
||||
"finished in a non-clean fashion, probably" +
|
||||
" because it didn't call SimpleTest.finish()",
|
||||
{loaded_test_url: TestRunner.getLoadedTestURL()});
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | " + TestRunner.getLoadedTestURL() +
|
||||
" finished in a non-clean fashion, probably" +
|
||||
" because it didn't call SimpleTest.finish()");
|
||||
tests.push({ result: false });
|
||||
}
|
||||
|
||||
var runtime = new Date().valueOf() - TestRunner._currentTestStartTime;
|
||||
|
||||
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
|
||||
"OK",
|
||||
undefined,
|
||||
"Finished in " + runtime + "ms",
|
||||
{runtime: runtime}
|
||||
);
|
||||
|
||||
TestRunner.log("TEST-END | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | finished in " + runtime + "ms");
|
||||
if (TestRunner.slowestTestTime < runtime && TestRunner._timeoutFactor == 1) {
|
||||
TestRunner.slowestTestTime = runtime;
|
||||
TestRunner.slowestTestURL = TestRunner.currentTestURL;
|
||||
|
@ -670,28 +496,13 @@ TestRunner.testUnloaded = function() {
|
|||
var max = TestRunner._expectedMaxAsserts;
|
||||
var min = TestRunner._expectedMinAsserts;
|
||||
if (numAsserts > max) {
|
||||
TestRunner.structuredLogger.testEnd(url,
|
||||
"ERROR",
|
||||
"OK",
|
||||
"Assertion count " + numAsserts + " is greater than expected range " +
|
||||
min + "-" + max + " assertions.",
|
||||
{assertions: numAsserts, min_asserts: min, max_asserts: max});
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " + url + " | Assertion count " + numAsserts + " is greater than expected range " + min + "-" + max + " assertions.");
|
||||
TestRunner.updateUI([{ result: false }]);
|
||||
} else if (numAsserts < min) {
|
||||
TestRunner.structuredLogger.testEnd(url,
|
||||
"OK",
|
||||
"ERROR",
|
||||
"Assertion count " + numAsserts + " is less than expected range " +
|
||||
min + "-" + max + " assertions.",
|
||||
{assertions: numAsserts, min_asserts: min, max_asserts: max});
|
||||
TestRunner.error("TEST-UNEXPECTED-PASS | " + url + " | Assertion count " + numAsserts + " is less than expected range " + min + "-" + max + " assertions.");
|
||||
TestRunner.updateUI([{ result: false }]);
|
||||
} else if (numAsserts > 0) {
|
||||
TestRunner.structuredLogger.testEnd(url,
|
||||
"ERROR",
|
||||
"ERROR",
|
||||
"Assertion count " + numAsserts + " within expected range " +
|
||||
min + "-" + max + " assertions.",
|
||||
{assertions: numAsserts, min_asserts: min, max_asserts: max});
|
||||
TestRunner.log("TEST-KNOWN-FAIL | " + url + " | Assertion count " + numAsserts + " within expected range " + min + "-" + max + " assertions.");
|
||||
}
|
||||
}
|
||||
TestRunner._currentTest++;
|
||||
|
|
|
@ -10,7 +10,7 @@ TestRunner.logger = LogController;
|
|||
/* Helper function */
|
||||
parseQueryString = function(encodedString, useArrays) {
|
||||
// strip a leading '?' from the encoded string
|
||||
var qstr = (encodedString[0] == "?") ? encodedString.substring(1) :
|
||||
var qstr = (encodedString[0] == "?") ? encodedString.substring(1) :
|
||||
encodedString;
|
||||
var pairs = qstr.replace(/\+/g, "%20").split(/(\&\;|\&\#38\;|\&|\&)/);
|
||||
var o = {};
|
||||
|
@ -92,7 +92,7 @@ var consoleLevel = params.consoleLevel || null;
|
|||
// repeat tells us how many times to repeat the tests
|
||||
if (params.repeat) {
|
||||
TestRunner.repeat = params.repeat;
|
||||
}
|
||||
}
|
||||
|
||||
if (params.runUntilFailure) {
|
||||
TestRunner.runUntilFailure = true;
|
||||
|
@ -135,13 +135,17 @@ if (params.dumpDMDAfterTest) {
|
|||
TestRunner.dumpDMDAfterTest = true;
|
||||
}
|
||||
|
||||
if (params.quiet) {
|
||||
TestRunner.quiet = true;
|
||||
}
|
||||
|
||||
// Log things to the console if appropriate.
|
||||
TestRunner.logger.addListener("dumpListener", consoleLevel + "", function(msg) {
|
||||
dump('\n' + msg.info.join(' ').trim() + "\n");
|
||||
dump(msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n");
|
||||
});
|
||||
|
||||
var gTestList = [];
|
||||
var RunSet = {};
|
||||
var RunSet = {}
|
||||
RunSet.runall = function(e) {
|
||||
// Filter tests to include|exclude tests based on data in params.filter.
|
||||
// This allows for including or excluding tests from the gTestList
|
||||
|
@ -186,7 +190,7 @@ RunSet.reloadAndRunAll = function(e) {
|
|||
window.location.href += "&autorun=1";
|
||||
} else {
|
||||
window.location.href += "?autorun=1";
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// UI Stuff
|
||||
|
@ -241,7 +245,7 @@ function hookupTests(testList) {
|
|||
}
|
||||
|
||||
document.getElementById('runtests').onclick = RunSet.reloadAndRunAll;
|
||||
document.getElementById('toggleNonTests').onclick = toggleNonTests;
|
||||
document.getElementById('toggleNonTests').onclick = toggleNonTests;
|
||||
// run automatically if autorun specified
|
||||
if (params.autorun) {
|
||||
RunSet.runall();
|
||||
|
|
|
@ -33,7 +33,15 @@ if (params.logFile) {
|
|||
TestRunner.logger.addListener("mozLogger", fileLevel + "", spl.getLogCallback());
|
||||
}
|
||||
|
||||
var RunSet = {};
|
||||
// if we get a quiet param, don't log to the console
|
||||
if (!params.quiet) {
|
||||
function dumpListener(msg) {
|
||||
dump("*** " + msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n");
|
||||
}
|
||||
TestRunner.logger.addListener("dumpListener", consoleLevel + "", dumpListener);
|
||||
}
|
||||
|
||||
var RunSet = {}
|
||||
RunSet.runall = function() {
|
||||
TestRunner.runTests(
|
||||
'test_bug362788.xhtml'
|
||||
|
@ -49,7 +57,7 @@ RunSet.reloadAndRunAll = function() {
|
|||
}
|
||||
};
|
||||
|
||||
// run automatically if
|
||||
// run automatically if
|
||||
if (params.autorun) {
|
||||
RunSet.runall();
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import time
|
|||
|
||||
import base
|
||||
|
||||
|
||||
def format_seconds(total):
|
||||
"""Format number of seconds to MM:SS.DD form."""
|
||||
minutes, seconds = divmod(total, 60)
|
||||
|
|
|
@ -63,7 +63,7 @@ class StreamHandler(BaseHandler):
|
|||
with self._lock:
|
||||
#XXX Should encoding be the formatter's responsibility?
|
||||
try:
|
||||
self.stream.write(formatted.encode("utf8", "replace"))
|
||||
self.stream.write(formatted.encode("utf8"))
|
||||
except:
|
||||
raise
|
||||
self.stream.flush()
|
||||
|
|
|
@ -113,23 +113,13 @@ class StructuredLogger(object):
|
|||
message is logged from this logger"""
|
||||
return self._handlers[self.name]
|
||||
|
||||
def log_raw(self, data):
|
||||
if "action" not in data:
|
||||
raise ValueError
|
||||
data = self._make_log_data(data['action'], data)
|
||||
self._handle_log(data)
|
||||
|
||||
def _log_data(self, action, data=None):
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
log_data = self._make_log_data(action, data)
|
||||
self._handle_log(log_data)
|
||||
|
||||
def _handle_log(self, data):
|
||||
with self._lock:
|
||||
log_data = self._make_log_data(action, data)
|
||||
for handler in self.handlers:
|
||||
handler(data)
|
||||
handler(log_data)
|
||||
|
||||
def _make_log_data(self, action, data):
|
||||
all_data = {"action": action,
|
||||
|
|
|
@ -20,11 +20,10 @@ class DeviceRunner(BaseRunner):
|
|||
remote devices (or emulators), such as B2G.
|
||||
"""
|
||||
def __init__(self, device_class, device_args=None, **kwargs):
|
||||
process_args = {'stream': sys.stdout,
|
||||
'processOutputLine': self.on_output,
|
||||
'onTimeout': self.on_timeout }
|
||||
process_args.update(kwargs.get('process_args') or {})
|
||||
|
||||
process_args = kwargs.get('process_args', {})
|
||||
process_args.update({ 'stream': sys.stdout,
|
||||
'processOutputLine': self.on_output,
|
||||
'onTimeout': self.on_timeout })
|
||||
kwargs['process_args'] = process_args
|
||||
BaseRunner.__init__(self, **kwargs)
|
||||
|
||||
|
|
|
@ -8,12 +8,12 @@ function MozillaLogger(aPath) {
|
|||
MozillaLogger.prototype = {
|
||||
|
||||
init : function(path) {},
|
||||
|
||||
|
||||
getLogCallback : function() {
|
||||
return function (msg) {
|
||||
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
|
||||
dump(data);
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
log : function(msg) {
|
||||
|
@ -42,13 +42,13 @@ SpecialPowersLogger.prototype = {
|
|||
|
||||
getLogCallback : function () {
|
||||
return function (msg) {
|
||||
var data = '\n' + msg.info.join(' ').trim() + '\n';
|
||||
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
|
||||
SpecialPowers.log(data);
|
||||
|
||||
if (data.indexOf("SimpleTest FINISH") >= 0) {
|
||||
SpecialPowers.closeLogFile();
|
||||
}
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
log : function (msg) {
|
||||
|
@ -76,7 +76,7 @@ function MozillaFileLogger(aPath) {
|
|||
}
|
||||
|
||||
MozillaFileLogger.prototype = {
|
||||
|
||||
|
||||
init : function (path) {
|
||||
var PR_WRITE_ONLY = 0x02; // Open for writing only.
|
||||
var PR_CREATE_FILE = 0x08;
|
||||
|
@ -85,40 +85,33 @@ MozillaFileLogger.prototype = {
|
|||
createInstance(Components.interfaces.nsILocalFile);
|
||||
this._file.initWithPath(path);
|
||||
this._foStream = Components.classes["@mozilla.org/network/file-output-stream;1"].
|
||||
createInstance(Components.interfaces.nsIFileOutputStream);
|
||||
|
||||
this._foStream.init(this._file, PR_WRITE_ONLY | PR_CREATE_FILE | PR_APPEND, 0664, 0);
|
||||
this._converter = Components.classes["@mozilla.org/intl/converter-output-stream;1"].
|
||||
createInstance(Components.interfaces.nsIConverterOutputStream);
|
||||
this._converter.init(this._foStream, "UTF-8", 0, 0);
|
||||
createInstance(Components.interfaces.nsIFileOutputStream);
|
||||
this._foStream.init(this._file, PR_WRITE_ONLY | PR_CREATE_FILE | PR_APPEND,
|
||||
0664, 0);
|
||||
},
|
||||
|
||||
getLogCallback : function() {
|
||||
return function (msg) {
|
||||
var data = '\n' + msg.info.join(' ').trim() + '\n';
|
||||
if (MozillaFileLogger._converter) {
|
||||
this._converter.writeString(data);
|
||||
}
|
||||
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
|
||||
if (MozillaFileLogger._foStream)
|
||||
this._foStream.write(data, data.length);
|
||||
|
||||
if (data.indexOf("SimpleTest FINISH") >= 0) {
|
||||
MozillaFileLogger.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
log : function(msg) {
|
||||
if (this._converter) {
|
||||
this._converter.writeString(msg);
|
||||
}
|
||||
if (this._foStream)
|
||||
this._foStream.write(msg, msg.length);
|
||||
},
|
||||
close : function() {
|
||||
if (this._converter) {
|
||||
this._converter.flush();
|
||||
this._converter.close();
|
||||
}
|
||||
|
||||
close : function() {
|
||||
if(this._foStream)
|
||||
this._foStream.close();
|
||||
|
||||
this._foStream = null;
|
||||
this._converter = null;
|
||||
this._file = null;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -49,7 +49,7 @@ function runTests() {
|
|||
DebuggerServer.init();
|
||||
}
|
||||
|
||||
function TestActor() {dump("instanciate test actor\n");}
|
||||
function TestActor() {dump("instanciate test actor");}
|
||||
TestActor.prototype = {
|
||||
actorPrefix: "test",
|
||||
|
||||
|
|
|
@ -357,7 +357,7 @@ function close_manager(aManagerWindow, aCallback, aLongerTimeout) {
|
|||
|
||||
aManagerWindow.addEventListener("unload", function() {
|
||||
try {
|
||||
dump("Manager window unload handler\n");
|
||||
dump("Manager window unload handler");
|
||||
this.removeEventListener("unload", arguments.callee, false);
|
||||
resolve();
|
||||
} catch(e) {
|
||||
|
@ -1103,7 +1103,7 @@ MockAddon.prototype = {
|
|||
get applyBackgroundUpdates() {
|
||||
return this._applyBackgroundUpdates;
|
||||
},
|
||||
|
||||
|
||||
set applyBackgroundUpdates(val) {
|
||||
if (val != AddonManager.AUTOUPDATE_DEFAULT &&
|
||||
val != AddonManager.AUTOUPDATE_DISABLE &&
|
||||
|
|
Загрузка…
Ссылка в новой задаче