зеркало из https://github.com/mozilla/gecko-dev.git
Bug 886570 - Initial integration of structured logs in mochitest; r=ahal,Ms2ger
This commit is contained in:
Родитель
722005748f
Коммит
4b03d2c025
|
@ -78,7 +78,7 @@ DEBUGGER_INFO = {
|
|||
# semi-deliberately leaked, so we set '--show-possibly-lost=no' to avoid
|
||||
# uninteresting output from those objects. We set '--smc-check==all-non-file'
|
||||
# and '--vex-iropt-register-updates=allregs-at-mem-access' so that valgrind
|
||||
# deals properly with JIT'd JavaScript code.
|
||||
# deals properly with JIT'd JavaScript code.
|
||||
"valgrind": {
|
||||
"interactive": False,
|
||||
"args": " ".join(["--leak-check=full",
|
||||
|
@ -602,29 +602,31 @@ class ShutdownLeaks(object):
|
|||
self.currentTest = None
|
||||
self.seenShutdown = False
|
||||
|
||||
def log(self, line):
|
||||
if line[2:11] == "DOMWINDOW":
|
||||
self._logWindow(line)
|
||||
elif line[2:10] == "DOCSHELL":
|
||||
self._logDocShell(line)
|
||||
elif line.startswith("TEST-START"):
|
||||
fileName = line.split(" ")[-1].strip().replace("chrome://mochitests/content/browser/", "")
|
||||
def log(self, message):
|
||||
if message['action'] == 'log':
|
||||
line = message['message']
|
||||
if line[2:11] == "DOMWINDOW":
|
||||
self._logWindow(line)
|
||||
elif line[2:10] == "DOCSHELL":
|
||||
self._logDocShell(line)
|
||||
elif message['action'] == 'test_start':
|
||||
fileName = message['test'].replace("chrome://mochitests/content/browser/", "")
|
||||
self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()}
|
||||
elif line.startswith("INFO TEST-END"):
|
||||
elif message['action'] == 'test_end':
|
||||
# don't track a test if no windows or docShells leaked
|
||||
if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]):
|
||||
self.tests.append(self.currentTest)
|
||||
self.currentTest = None
|
||||
elif line.startswith("INFO TEST-START | Shutdown"):
|
||||
elif message['action'] == 'suite_end':
|
||||
self.seenShutdown = True
|
||||
|
||||
def process(self):
|
||||
for test in self._parseLeakingTests():
|
||||
for url, count in self._zipLeakedWindows(test["leakedWindows"]):
|
||||
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]", test["fileName"], count, url)
|
||||
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]" % (test["fileName"], count, url))
|
||||
|
||||
if test["leakedDocShells"]:
|
||||
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown", test["fileName"], len(test["leakedDocShells"]))
|
||||
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown" % (test["fileName"], len(test["leakedDocShells"])))
|
||||
|
||||
def _logWindow(self, line):
|
||||
created = line[:2] == "++"
|
||||
|
@ -633,7 +635,7 @@ class ShutdownLeaks(object):
|
|||
|
||||
# log line has invalid format
|
||||
if not pid or not serial:
|
||||
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line)
|
||||
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
|
||||
return
|
||||
|
||||
key = pid + "." + serial
|
||||
|
@ -654,7 +656,7 @@ class ShutdownLeaks(object):
|
|||
|
||||
# log line has invalid format
|
||||
if not pid or not id:
|
||||
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line)
|
||||
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
|
||||
return
|
||||
|
||||
key = pid + "." + id
|
||||
|
|
|
@ -21,11 +21,13 @@ fennecLogcatFilters = [ "The character encoding of the HTML document was not dec
|
|||
class RemoteAutomation(Automation):
|
||||
_devicemanager = None
|
||||
|
||||
def __init__(self, deviceManager, appName = '', remoteLog = None):
|
||||
def __init__(self, deviceManager, appName = '', remoteLog = None,
|
||||
processArgs=None):
|
||||
self._devicemanager = deviceManager
|
||||
self._appName = appName
|
||||
self._remoteProfile = None
|
||||
self._remoteLog = remoteLog
|
||||
self._processArgs = processArgs or {};
|
||||
|
||||
# Default our product to fennec
|
||||
self._product = "fennec"
|
||||
|
@ -184,17 +186,21 @@ class RemoteAutomation(Automation):
|
|||
if stdout == None or stdout == -1 or stdout == subprocess.PIPE:
|
||||
stdout = self._remoteLog
|
||||
|
||||
return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName)
|
||||
return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName,
|
||||
**self._processArgs)
|
||||
|
||||
# be careful here as this inner class doesn't have access to outer class members
|
||||
class RProcess(object):
|
||||
# device manager process
|
||||
dm = None
|
||||
def __init__(self, dm, cmd, stdout = None, stderr = None, env = None, cwd = None, app = None):
|
||||
def __init__(self, dm, cmd, stdout=None, stderr=None, env=None, cwd=None, app=None,
|
||||
messageLogger=None):
|
||||
self.dm = dm
|
||||
self.stdoutlen = 0
|
||||
self.lastTestSeen = "remoteautomation.py"
|
||||
self.proc = dm.launchProcess(cmd, stdout, cwd, env, True)
|
||||
self.messageLogger = messageLogger
|
||||
|
||||
if (self.proc is None):
|
||||
if cmd[0] == 'am':
|
||||
self.proc = stdout
|
||||
|
@ -210,6 +216,9 @@ class RemoteAutomation(Automation):
|
|||
# The benefit of the following sleep is unclear; it was formerly 15 seconds
|
||||
time.sleep(1)
|
||||
|
||||
# Used to buffer log messages until we meet a line break
|
||||
self.logBuffer = ""
|
||||
|
||||
@property
|
||||
def pid(self):
|
||||
pid = self.dm.processExist(self.procName)
|
||||
|
@ -221,29 +230,49 @@ class RemoteAutomation(Automation):
|
|||
return 0
|
||||
return pid
|
||||
|
||||
@property
|
||||
def stdout(self):
|
||||
def read_stdout(self):
|
||||
""" Fetch the full remote log file using devicemanager and return just
|
||||
the new log entries since the last call (as a multi-line string).
|
||||
the new log entries since the last call (as a list of messages or lines).
|
||||
"""
|
||||
if self.dm.fileExists(self.proc):
|
||||
try:
|
||||
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
|
||||
except DMError:
|
||||
# we currently don't retry properly in the pullFile
|
||||
# function in dmSUT, so an error here is not necessarily
|
||||
# the end of the world
|
||||
return ''
|
||||
self.stdoutlen += len(newLogContent)
|
||||
# Match the test filepath from the last TEST-START line found in the new
|
||||
# log content. These lines are in the form:
|
||||
# 1234 INFO TEST-START | /filepath/we/wish/to/capture.html\n
|
||||
if not self.dm.fileExists(self.proc):
|
||||
return []
|
||||
try:
|
||||
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
|
||||
except DMError:
|
||||
# we currently don't retry properly in the pullFile
|
||||
# function in dmSUT, so an error here is not necessarily
|
||||
# the end of the world
|
||||
return []
|
||||
if not newLogContent:
|
||||
return []
|
||||
|
||||
self.stdoutlen += len(newLogContent)
|
||||
|
||||
if self.messageLogger is None:
|
||||
testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", newLogContent)
|
||||
if testStartFilenames:
|
||||
self.lastTestSeen = testStartFilenames[-1]
|
||||
return newLogContent.strip('\n').strip()
|
||||
else:
|
||||
return ''
|
||||
print newLogContent
|
||||
return [newLogContent]
|
||||
|
||||
self.logBuffer += newLogContent
|
||||
lines = self.logBuffer.split('\n')
|
||||
if not lines:
|
||||
return
|
||||
|
||||
# We only keep the last (unfinished) line in the buffer
|
||||
self.logBuffer = lines[-1]
|
||||
del lines[-1]
|
||||
messages = []
|
||||
for line in lines:
|
||||
# This passes the line to the logger (to be logged or buffered)
|
||||
# and returns a list of structured messages (dict) or None, depending on the log
|
||||
parsed_messages = self.messageLogger.write(line)
|
||||
for message in parsed_messages:
|
||||
if message['action'] == 'test_start':
|
||||
self.lastTestSeen = message['test']
|
||||
messages += parsed_messages
|
||||
return messages
|
||||
|
||||
@property
|
||||
def getLastTestSeen(self):
|
||||
|
@ -258,7 +287,7 @@ class RemoteAutomation(Automation):
|
|||
def wait(self, timeout = None, noOutputTimeout = None):
|
||||
timer = 0
|
||||
noOutputTimer = 0
|
||||
interval = 20
|
||||
interval = 20
|
||||
|
||||
if timeout == None:
|
||||
timeout = self.timeout
|
||||
|
@ -266,10 +295,9 @@ class RemoteAutomation(Automation):
|
|||
status = 0
|
||||
while (self.dm.getTopActivity() == self.procName):
|
||||
# retrieve log updates every 60 seconds
|
||||
if timer % 60 == 0:
|
||||
t = self.stdout
|
||||
if t != '':
|
||||
print t
|
||||
if timer % 60 == 0:
|
||||
messages = self.read_stdout()
|
||||
if messages:
|
||||
noOutputTimer = 0
|
||||
|
||||
time.sleep(interval)
|
||||
|
@ -283,7 +311,7 @@ class RemoteAutomation(Automation):
|
|||
break
|
||||
|
||||
# Flush anything added to stdout during the sleep
|
||||
print self.stdout
|
||||
self.read_stdout()
|
||||
|
||||
return status
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ public class FennecMochitestAssert implements Assert {
|
|||
private LinkedList<testInfo> mTestList = new LinkedList<testInfo>();
|
||||
|
||||
// Internal state variables to make logging match up with existing mochitests
|
||||
private int mLineNumber = 0;
|
||||
private int mPassed = 0;
|
||||
private int mFailed = 0;
|
||||
private int mTodo = 0;
|
||||
|
@ -45,13 +44,13 @@ public class FennecMochitestAssert implements Assert {
|
|||
|
||||
String message;
|
||||
if (!mLogStarted) {
|
||||
dumpLog(Integer.toString(mLineNumber++) + " INFO SimpleTest START");
|
||||
dumpLog("SimpleTest START");
|
||||
mLogStarted = true;
|
||||
}
|
||||
|
||||
if (mLogTestName != "") {
|
||||
long diff = SystemClock.uptimeMillis() - mStartTime;
|
||||
message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName;
|
||||
message = "TEST-END | " + mLogTestName;
|
||||
message += " | finished in " + diff + "ms";
|
||||
dumpLog(message);
|
||||
mLogTestName = "";
|
||||
|
@ -63,7 +62,7 @@ public class FennecMochitestAssert implements Assert {
|
|||
mLogTestName = nameParts[nameParts.length - 1];
|
||||
mStartTime = SystemClock.uptimeMillis();
|
||||
|
||||
dumpLog(Integer.toString(mLineNumber++) + " INFO TEST-START | " + mLogTestName);
|
||||
dumpLog("TEST-START | " + mLogTestName);
|
||||
}
|
||||
|
||||
class testInfo {
|
||||
|
@ -95,7 +94,7 @@ public class FennecMochitestAssert implements Assert {
|
|||
String diag = test.mName;
|
||||
if (test.mDiag != null) diag += " - " + test.mDiag;
|
||||
|
||||
String message = Integer.toString(mLineNumber++) + " INFO " + resultString + " | " + mLogTestName + " | " + diag;
|
||||
String message = resultString + " | " + mLogTestName + " | " + diag;
|
||||
dumpLog(message);
|
||||
|
||||
if (test.mInfo) {
|
||||
|
@ -117,21 +116,21 @@ public class FennecMochitestAssert implements Assert {
|
|||
|
||||
if (mLogTestName != "") {
|
||||
long diff = SystemClock.uptimeMillis() - mStartTime;
|
||||
message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName;
|
||||
message = "TEST-END | " + mLogTestName;
|
||||
message += " | finished in " + diff + "ms";
|
||||
dumpLog(message);
|
||||
mLogTestName = "";
|
||||
}
|
||||
|
||||
message = Integer.toString(mLineNumber++) + " INFO TEST-START | Shutdown";
|
||||
message = "TEST-START | Shutdown";
|
||||
dumpLog(message);
|
||||
message = Integer.toString(mLineNumber++) + " INFO Passed: " + Integer.toString(mPassed);
|
||||
message = "Passed: " + Integer.toString(mPassed);
|
||||
dumpLog(message);
|
||||
message = Integer.toString(mLineNumber++) + " INFO Failed: " + Integer.toString(mFailed);
|
||||
message = "Failed: " + Integer.toString(mFailed);
|
||||
dumpLog(message);
|
||||
message = Integer.toString(mLineNumber++) + " INFO Todo: " + Integer.toString(mTodo);
|
||||
message = "Todo: " + Integer.toString(mTodo);
|
||||
dumpLog(message);
|
||||
message = Integer.toString(mLineNumber++) + " INFO SimpleTest FINISHED";
|
||||
message = "SimpleTest FINISHED";
|
||||
dumpLog(message);
|
||||
}
|
||||
|
||||
|
|
|
@ -149,7 +149,6 @@ is(r.result, null,
|
|||
|
||||
// Test loading an empty file works (and doesn't crash!)
|
||||
var emptyFile = createFileWithData("");
|
||||
dump("hello nurse");
|
||||
r = new FileReader();
|
||||
r.onload = getLoadHandler("", 0, "empty no encoding reading");
|
||||
r.readAsText(emptyFile, "");
|
||||
|
@ -427,8 +426,8 @@ function testHasRun() {
|
|||
//alert(testRanCounter);
|
||||
++testRanCounter;
|
||||
if (testRanCounter == expectedTestCount) {
|
||||
is(onloadHasRunText, true, "onload text should have fired by now");
|
||||
is(onloadHasRunBinary, true, "onload binary should have fired by now");
|
||||
is(onloadHasRunText, true, "onload text should have fired by now");
|
||||
is(onloadHasRunBinary, true, "onload binary should have fired by now");
|
||||
SimpleTest.finish();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,34 +37,30 @@
|
|||
oldComplete();
|
||||
}
|
||||
};
|
||||
|
||||
let oldLog = TestRunner.log;
|
||||
TestRunner.log = function(msg) {
|
||||
TestRunner.structuredLogger._dumpMessage = function(msg) {
|
||||
sendAsyncMessage("test:SpeechSynthesis:ipcTestMessage", { msg: msg });
|
||||
}
|
||||
}
|
||||
|
||||
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
|
||||
|
||||
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
function validStructuredMessage(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
}
|
||||
function onTestMessage(data) {
|
||||
let message = SpecialPowers.wrap(data).json.msg;
|
||||
let match = regex.exec(message);
|
||||
if (match) {
|
||||
let state = match[1];
|
||||
let details = match[2] + " | " + match[3];
|
||||
let message = SpecialPowers.wrap(data).data.msg;
|
||||
|
||||
switch (state) {
|
||||
case "TEST-PASS":
|
||||
case "TEST-KNOWN-FAIL":
|
||||
ok(true, details);
|
||||
if (validStructuredMessage(message)) {
|
||||
if (message.test === undefined || message.message === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
let details = message.test + " | " + message.message;
|
||||
|
||||
switch(message.action) {
|
||||
case "test_status":
|
||||
case "test_end":
|
||||
ok(message.expected === undefined, message.test, message.message);
|
||||
break;
|
||||
|
||||
case "TEST-UNEXPECTED-FAIL":
|
||||
case "TEST-UNEXPECTED-PASS":
|
||||
ok(false, details);
|
||||
break;
|
||||
|
||||
case "TEST-DEBUG-INFO":
|
||||
default:
|
||||
info(details);
|
||||
}
|
||||
|
|
|
@ -36,34 +36,30 @@
|
|||
}
|
||||
};
|
||||
|
||||
let oldLog = TestRunner.log;
|
||||
TestRunner.log = function(msg) {
|
||||
TestRunner.structuredLogger._dumpMessage = function(msg) {
|
||||
sendAsyncMessage("test:DeviceStorage:ipcTestMessage", { msg: msg });
|
||||
}
|
||||
}
|
||||
|
||||
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
|
||||
|
||||
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
function validStructuredMessage(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
}
|
||||
function onTestMessage(data) {
|
||||
let message = SpecialPowers.wrap(data).json.msg;
|
||||
let match = regex.exec(message);
|
||||
if (match) {
|
||||
let state = match[1];
|
||||
let details = match[2] + " | " + match[3];
|
||||
let message = SpecialPowers.wrap(data).data.msg;
|
||||
|
||||
switch (state) {
|
||||
case "TEST-PASS":
|
||||
case "TEST-KNOWN-FAIL":
|
||||
ok(true, details);
|
||||
if (validStructuredMessage(message)) {
|
||||
if (message.test === undefined || message.message === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (message.action) {
|
||||
case "test_status":
|
||||
case "test_end":
|
||||
ok(message.expected === undefined, message.test, message.message);
|
||||
break;
|
||||
|
||||
case "TEST-UNEXPECTED-FAIL":
|
||||
case "TEST-UNEXPECTED-PASS":
|
||||
ok(false, details);
|
||||
break;
|
||||
|
||||
case "TEST-DEBUG-INFO":
|
||||
default:
|
||||
let details = message.test + " | " + message.message;
|
||||
info(details);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,11 +42,17 @@ var W3CTest = {
|
|||
|
||||
/**
|
||||
* Prefixes for the error logging. Indexed first by int(todo) and second by
|
||||
* int(result).
|
||||
* int(result). Also contains the test's status, and expected status.
|
||||
*/
|
||||
"prefixes": [
|
||||
["TEST-UNEXPECTED-FAIL", "TEST-PASS"],
|
||||
["TEST-KNOWN-FAIL", "TEST-UNEXPECTED-PASS"]
|
||||
[
|
||||
{status: 'FAIL', expected: 'PASS', message: "TEST-UNEXPECTED-FAIL"},
|
||||
{status: 'PASS', expected: 'PASS', message: "TEST-PASS"}
|
||||
],
|
||||
[
|
||||
{status: 'FAIL', expected: 'FAIL', message: "TEST-KNOWN-FAIL"},
|
||||
{status: 'PASS', expected: 'FAIL', message: "TEST-UNEXPECTED-PASS"}
|
||||
]
|
||||
],
|
||||
|
||||
/**
|
||||
|
@ -133,14 +139,21 @@ var W3CTest = {
|
|||
*/
|
||||
"_log": function(test) {
|
||||
var url = this.getURL();
|
||||
var msg = this.prefixes[+test.todo][+test.result] + " | ";
|
||||
if (url) {
|
||||
msg += url;
|
||||
}
|
||||
msg += " | " + this.formatTestMessage(test);
|
||||
var message = this.formatTestMessage(test);
|
||||
var result = this.prefixes[+test.todo][+test.result];
|
||||
|
||||
if (this.runner) {
|
||||
this.runner[(test.result === !test.todo) ? "log" : "error"](msg);
|
||||
this.runner.structuredLogger.testStatus(url,
|
||||
test.name,
|
||||
result.status,
|
||||
result.expected,
|
||||
message);
|
||||
} else {
|
||||
var msg = result.message + " | ";
|
||||
if (url) {
|
||||
msg += url;
|
||||
}
|
||||
msg += " | " + this.formatTestMessage(test);
|
||||
dump(msg + "\n");
|
||||
}
|
||||
},
|
||||
|
|
|
@ -49,44 +49,33 @@
|
|||
}
|
||||
};
|
||||
|
||||
function sendTestMessage(msg) {
|
||||
TestRunner.structuredLogger._dumpMessage = function(msg) {
|
||||
sendAsyncMessage("test:indexedDB:ipcTestMessage", { msg: msg });
|
||||
}
|
||||
|
||||
TestRunner.log = sendTestMessage;
|
||||
TestRunner.error = sendTestMessage;
|
||||
}
|
||||
|
||||
let regexString =
|
||||
"^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL" +
|
||||
"|TEST-DEBUG-INFO|TEST-INFO) \\| ([^\\|]+) \\|(.*)";
|
||||
|
||||
let regex = new RegExp(regexString);
|
||||
|
||||
let seenTestMessage = false;
|
||||
|
||||
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
function validStructuredMessage(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
}
|
||||
function onTestMessage(data) {
|
||||
seenTestMessage = true;
|
||||
let message = SpecialPowers.wrap(data).data.msg;
|
||||
let match = regex.exec(message);
|
||||
if (match) {
|
||||
let state = match[1];
|
||||
let details = match[2] + " | " + match[3];
|
||||
|
||||
switch (state) {
|
||||
case "TEST-PASS":
|
||||
case "TEST-KNOWN-FAIL":
|
||||
ok(true, details);
|
||||
if (validStructuredMessage(message)) {
|
||||
if (message.test === undefined || message.message === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (message.action) {
|
||||
case "test_status":
|
||||
case "test_end":
|
||||
ok(message.expected === undefined, message.test, message.message);
|
||||
break;
|
||||
|
||||
case "TEST-UNEXPECTED-FAIL":
|
||||
case "TEST-UNEXPECTED-PASS":
|
||||
ok(false, details);
|
||||
break;
|
||||
|
||||
case "TEST-INFO":
|
||||
case "TEST-DEBUG-INFO":
|
||||
default:
|
||||
let details = message.test + " | " + message.message;
|
||||
info(details);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ inputmethod_setup(function() {
|
|||
function appFrameScript() {
|
||||
let input = content.document.getElementById('test-input');
|
||||
input.onkeydown = input.onkeypress = input.onkeyup = function(event) {
|
||||
dump('key event was fired in file_test_backspace_event.html.');
|
||||
dump('key event was fired in file_test_backspace_event.html.\n');
|
||||
sendAsyncMessage('test:KeyBoard:keyEvent', {'type':event.type});
|
||||
};
|
||||
}
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
SpecialPowers.prototype.unregisterProcessCrashObservers = function() { };
|
||||
|
||||
content.wrappedJSObject.RunSet.reloadAndRunAll({
|
||||
preventDefault: function() { },
|
||||
__exposedProps__: { preventDefault: 'r' }
|
||||
preventDefault: function() { },
|
||||
__exposedProps__: { preventDefault: 'r' }
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -44,37 +44,31 @@
|
|||
oldComplete();
|
||||
}
|
||||
};
|
||||
let oldLog = TestRunner.log;
|
||||
TestRunner.log = function(msg) {
|
||||
|
||||
TestRunner.structuredLogger._dumpMessage = function(msg) {
|
||||
sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
|
||||
};
|
||||
TestRunner.error = function(msg) {
|
||||
sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
|
||||
|
||||
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
function validStructuredMessage(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
}
|
||||
function onTestMessage(data) {
|
||||
let message = SpecialPowers.wrap(data).json.msg;
|
||||
let match = regex.exec(message);
|
||||
if (match) {
|
||||
let state = match[1];
|
||||
let details = match[2] + " | " + match[3];
|
||||
let message = SpecialPowers.wrap(data).data.msg;
|
||||
|
||||
switch (state) {
|
||||
case "TEST-PASS":
|
||||
case "TEST-KNOWN-FAIL":
|
||||
ok(true, details);
|
||||
if (validStructuredMessage(message)) {
|
||||
if (message.test === undefined || message.message === undefined) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (message.action) {
|
||||
case "test_status":
|
||||
case "test_end":
|
||||
ok(message.expected === undefined, message.test, message.message);
|
||||
break;
|
||||
|
||||
case "TEST-UNEXPECTED-FAIL":
|
||||
case "TEST-UNEXPECTED-PASS":
|
||||
ok(false, details);
|
||||
break;
|
||||
|
||||
case "TEST-DEBUG-INFO":
|
||||
default:
|
||||
let details = message.test + " | " + message.message;
|
||||
info(details);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
<body onload="runTests()">
|
||||
<script class="testbody" type="application/javascript">
|
||||
dump('lastScript');
|
||||
dump('lastScript\n');
|
||||
|
||||
SimpleTest.waitForExplicitFinish();
|
||||
setTestPluginEnabledState(SpecialPowers.Ci.nsIPluginTag.STATE_ENABLED);
|
||||
|
|
|
@ -7,7 +7,7 @@ onmessage = function(evt) {
|
|||
id = setTimeout(function() {}, 200);
|
||||
postMessage(clearTimeout(id) == undefined);
|
||||
|
||||
postMessage(dump(42) == undefined);
|
||||
postMessage(dump(42 + '\n') == undefined);
|
||||
|
||||
postMessage('finished');
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=661980
|
|||
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=661980">Mozilla Bug 661980</a>
|
||||
<p id="display"></p>
|
||||
<div id="content" style="display: none">
|
||||
|
||||
|
||||
</div>
|
||||
<pre id="test">
|
||||
<script type="application/javascript">
|
||||
|
@ -30,7 +30,7 @@ var fakeTarget = {
|
|||
|
||||
var mouseevent = document.createEvent("MouseEvent");
|
||||
var didThrow = false;
|
||||
dump("hello nurse");
|
||||
dump("hello nurse\n");
|
||||
try {
|
||||
mouseevent.initMouseEvent("mouseover",
|
||||
false, false,
|
||||
|
|
|
@ -33,11 +33,11 @@ function runTest() {
|
|||
// Let parent process sent message.
|
||||
// Wait for onLoad event from 'inner' iframe.
|
||||
yield;
|
||||
|
||||
|
||||
postPos();
|
||||
|
||||
|
||||
inner.onload = null;
|
||||
dump("hi");
|
||||
dump("hi\n");
|
||||
inner.contentWindow.location = "bug583889_inner2.html#id2"
|
||||
waitAsync();
|
||||
// Let parent process sent message.
|
||||
|
|
|
@ -74,7 +74,7 @@ addLoadEvent(function() {
|
|||
|
||||
try {
|
||||
var sheet = $("cross-origin-sheet").sheet;
|
||||
dump('aaa');
|
||||
dump('aaa\n');
|
||||
is(sheet.cssRules.length, 2,
|
||||
"Should be able to get length of list of rules");
|
||||
is(sheet.cssRules[0].style.color, "green",
|
||||
|
|
|
@ -462,7 +462,7 @@ class Marionette(object):
|
|||
emulator_binary=None, emulator_res=None, connect_to_running_emulator=False,
|
||||
gecko_log=None, homedir=None, baseurl=None, no_window=False, logdir=None,
|
||||
busybox=None, symbols_path=None, timeout=None, socket_timeout=360,
|
||||
device_serial=None, adb_path=None):
|
||||
device_serial=None, adb_path=None, process_args=None):
|
||||
self.host = host
|
||||
self.port = self.local_port = port
|
||||
self.bin = bin
|
||||
|
@ -519,7 +519,8 @@ class Marionette(object):
|
|||
userdata=emulator_img,
|
||||
resolution=emulator_res,
|
||||
profile=profile,
|
||||
adb_path=adb_path)
|
||||
adb_path=adb_path,
|
||||
process_args=process_args)
|
||||
self.emulator = self.runner.device
|
||||
self.emulator.start()
|
||||
self.port = self.emulator.setup_port_forwarding(self.port)
|
||||
|
@ -527,7 +528,8 @@ class Marionette(object):
|
|||
|
||||
if connect_to_running_emulator:
|
||||
self.runner = B2GEmulatorRunner(b2g_home=homedir,
|
||||
logdir=logdir)
|
||||
logdir=logdir,
|
||||
process_args=process_args)
|
||||
self.emulator = self.runner.device
|
||||
self.emulator.connect()
|
||||
self.port = self.emulator.setup_port_forwarding(self.port)
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
width="1024">
|
||||
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/MozillaLogger.js"/>
|
||||
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/LogController.js"/>
|
||||
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/TestRunner.js"/>
|
||||
<script type="application/javascript" src="chrome://mochikit/content/chrome-harness.js"/>
|
||||
<script type="application/javascript" src="chrome://mochikit/content/manifestLibrary.js" />
|
||||
<script type="application/javascript" src="chrome://mochikit/content/chunkifyTests.js"/>
|
||||
|
@ -65,7 +66,7 @@
|
|||
var Cc = Components.classes;
|
||||
var Ci = Components.interfaces;
|
||||
}
|
||||
|
||||
|
||||
var gConfig;
|
||||
|
||||
var gDumper = {
|
||||
|
@ -82,12 +83,12 @@
|
|||
delete this.fileLogger;
|
||||
return this.fileLogger = logger;
|
||||
},
|
||||
|
||||
structuredLogger: new StructuredLogger(),
|
||||
dump: function (str) {
|
||||
dump(str);
|
||||
this.structuredLogger.info(str);
|
||||
|
||||
if (this.fileLogger)
|
||||
this.fileLogger._foStream.write(str, str.length);
|
||||
this.fileLogger.log(str);
|
||||
},
|
||||
|
||||
done: function () {
|
||||
|
@ -141,7 +142,18 @@
|
|||
this.lastOutputTime = Date.now();
|
||||
this.results.push(result);
|
||||
|
||||
this.dumper.dump(result.result + " | " + this.path + " | " + result.msg + "\n");
|
||||
if (result.info) {
|
||||
if (result.msg) {
|
||||
this.dumper.structuredLogger.info(result.msg);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
this.dumper.structuredLogger.testStatus(this.path,
|
||||
result.msg,
|
||||
result.status,
|
||||
result.expected,
|
||||
result.msg);
|
||||
},
|
||||
|
||||
setDuration: function setDuration(duration) {
|
||||
|
|
|
@ -59,7 +59,7 @@ function testInit() {
|
|||
messageManager.removeMessageListener("chromeEvent", messageHandler);
|
||||
var url = m.json.data;
|
||||
|
||||
// Window is the [ChromeWindow] for messageManager, so we need content.window
|
||||
// Window is the [ChromeWindow] for messageManager, so we need content.window
|
||||
// Currently chrome tests are run in a content window instead of a ChromeWindow
|
||||
var webNav = content.window.QueryInterface(Components.interfaces.nsIInterfaceRequestor)
|
||||
.getInterface(Components.interfaces.nsIWebNavigation);
|
||||
|
@ -158,7 +158,7 @@ Tester.prototype = {
|
|||
if (gConfig.repeat)
|
||||
this.repeat = gConfig.repeat;
|
||||
|
||||
this.dumper.dump("*** Start BrowserChrome Test Results ***\n");
|
||||
this.dumper.structuredLogger.info("*** Start BrowserChrome Test Results ***");
|
||||
Services.console.registerListener(this);
|
||||
Services.obs.addObserver(this, "chrome-document-global-created", false);
|
||||
Services.obs.addObserver(this, "content-document-global-created", false);
|
||||
|
@ -204,7 +204,7 @@ Tester.prototype = {
|
|||
}
|
||||
|
||||
// Remove stale windows
|
||||
this.dumper.dump("TEST-INFO | checking window state\n");
|
||||
this.dumper.structuredLogger.info("checking window state");
|
||||
let windowsEnum = Services.wm.getEnumerator(null);
|
||||
while (windowsEnum.hasMoreElements()) {
|
||||
let win = windowsEnum.getNext();
|
||||
|
@ -223,7 +223,10 @@ Tester.prototype = {
|
|||
if (this.currentTest)
|
||||
this.currentTest.addResult(new testResult(false, msg, "", false));
|
||||
else
|
||||
this.dumper.dump("TEST-UNEXPECTED-FAIL | (browser-test.js) | " + msg + "\n");
|
||||
this.dumper.structuredLogger.testEnd("browser-test.js",
|
||||
"FAIL",
|
||||
"PASS",
|
||||
msg);
|
||||
|
||||
win.close();
|
||||
}
|
||||
|
@ -250,22 +253,23 @@ Tester.prototype = {
|
|||
Services.obs.removeObserver(this, "chrome-document-global-created");
|
||||
Services.obs.removeObserver(this, "content-document-global-created");
|
||||
this.Promise.Debugging.clearUncaughtErrorObservers();
|
||||
this.dumper.dump("\nINFO TEST-START | Shutdown\n");
|
||||
this.dumper.structuredLogger.info("TEST-START | Shutdown");
|
||||
|
||||
if (this.tests.length) {
|
||||
this.dumper.dump("Browser Chrome Test Summary\n");
|
||||
|
||||
this.dumper.dump("\tPassed: " + passCount + "\n" +
|
||||
"\tFailed: " + failCount + "\n" +
|
||||
"\tTodo: " + todoCount + "\n");
|
||||
this.dumper.structuredLogger.info("Browser Chrome Test Summary");
|
||||
this.dumper.structuredLogger.info("Passed: " + passCount);
|
||||
this.dumper.structuredLogger.info("Failed: " + failCount);
|
||||
this.dumper.structuredLogger.info("Todo: " + todoCount);
|
||||
} else {
|
||||
this.dumper.dump("TEST-UNEXPECTED-FAIL | (browser-test.js) | " +
|
||||
"No tests to run. Did you pass an invalid --test-path?\n");
|
||||
this.dumper.structuredLogger.testEnd("browser-test.js",
|
||||
"FAIL",
|
||||
"PASS",
|
||||
"No tests to run. Did you pass an invalid --test-path?");
|
||||
}
|
||||
this.dumper.dump("\n*** End BrowserChrome Test Results ***\n");
|
||||
|
||||
this.dumper.structuredLogger.info("*** End BrowserChrome Test Results ***");
|
||||
|
||||
this.dumper.done();
|
||||
|
||||
|
||||
// Tests complete, notify the callback and return
|
||||
this.callback(this.tests);
|
||||
this.callback = null;
|
||||
|
@ -414,7 +418,7 @@ Tester.prototype = {
|
|||
.getService(Ci.nsIXULRuntime)
|
||||
.processType == Ci.nsIXULRuntime.PROCESS_TYPE_DEFAULT)
|
||||
{
|
||||
this.MemoryStats.dump((l) => { this.dumper.dump(l + "\n"); },
|
||||
this.MemoryStats.dump(this.dumper.structuredLogger,
|
||||
this.currentTestIndex,
|
||||
this.currentTest.path,
|
||||
gConfig.dumpOutputDirectory,
|
||||
|
@ -424,7 +428,10 @@ Tester.prototype = {
|
|||
|
||||
// Note the test run time
|
||||
let time = Date.now() - this.lastStartTime;
|
||||
this.dumper.dump("INFO TEST-END | " + this.currentTest.path + " | finished in " + time + "ms\n");
|
||||
this.dumper.structuredLogger.testEnd(this.currentTest.path,
|
||||
"OK",
|
||||
undefined,
|
||||
"finished in " + time + "ms");
|
||||
this.currentTest.setDuration(time);
|
||||
|
||||
if (this.runUntilFailure && this.currentTest.failCount > 0) {
|
||||
|
@ -530,7 +537,7 @@ Tester.prototype = {
|
|||
}),
|
||||
|
||||
execTest: function Tester_execTest() {
|
||||
this.dumper.dump("TEST-START | " + this.currentTest.path + "\n");
|
||||
this.dumper.structuredLogger.testStart(this.currentTest.path);
|
||||
|
||||
this.SimpleTest.reset();
|
||||
|
||||
|
@ -699,10 +706,14 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
|
|||
this.todo = aIsTodo;
|
||||
|
||||
if (this.pass) {
|
||||
if (aIsTodo)
|
||||
this.result = "TEST-KNOWN-FAIL";
|
||||
else
|
||||
this.result = "TEST-PASS";
|
||||
if (aIsTodo) {
|
||||
this.status = "FAIL";
|
||||
this.expected = "FAIL";
|
||||
} else {
|
||||
this.status = "PASS";
|
||||
this.expected = "PASS";
|
||||
}
|
||||
|
||||
} else {
|
||||
if (aDiag) {
|
||||
if (typeof aDiag == "object" && "fileName" in aDiag) {
|
||||
|
@ -725,10 +736,13 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
|
|||
}
|
||||
this.msg += Task.Debugging.generateReadableStack(normalized, " ");
|
||||
}
|
||||
if (aIsTodo)
|
||||
this.result = "TEST-UNEXPECTED-PASS";
|
||||
else
|
||||
this.result = "TEST-UNEXPECTED-FAIL";
|
||||
if (aIsTodo) {
|
||||
this.status = "PASS";
|
||||
this.expected = "FAIL";
|
||||
} else {
|
||||
this.status = "FAIL";
|
||||
this.expected = "PASS";
|
||||
}
|
||||
|
||||
if (gConfig.debugOnFailure) {
|
||||
// You've hit this line because you requested to break into the
|
||||
|
@ -741,7 +755,6 @@ function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
|
|||
function testMessage(aName) {
|
||||
this.msg = aName || "";
|
||||
this.info = true;
|
||||
this.result = "TEST-INFO";
|
||||
}
|
||||
|
||||
// Need to be careful adding properties to this object, since its properties
|
||||
|
|
|
@ -9,7 +9,7 @@ Components.utils.import("resource://gre/modules/NetUtil.jsm");
|
|||
|
||||
/*
|
||||
* getChromeURI converts a URL to a URI
|
||||
*
|
||||
*
|
||||
* url: string of a URL (http://mochi.test/test.html)
|
||||
* returns: a nsiURI object representing the given URL
|
||||
*
|
||||
|
@ -22,7 +22,7 @@ function getChromeURI(url) {
|
|||
|
||||
/*
|
||||
* Convert a URL (string) into a nsIURI or NSIJARURI
|
||||
* This is intended for URL's that are on a file system
|
||||
* This is intended for URL's that are on a file system
|
||||
* or in packaged up in an extension .jar file
|
||||
*
|
||||
* url: a string of a url on the local system(http://localhost/blah.html)
|
||||
|
@ -64,7 +64,7 @@ function getChromeDir(resolvedURI) {
|
|||
* aTestPath: passed in testPath value from command line such as: dom/tests/mochitest
|
||||
* aDir: the test dir to append to the baseURL after getting a directory interface
|
||||
*
|
||||
* As a note, this is hardcoded to the .jar structure we use for mochitest.
|
||||
* As a note, this is hardcoded to the .jar structure we use for mochitest.
|
||||
* Please don't assume this works for all jar files.
|
||||
*/
|
||||
function getMochitestJarListing(aBasePath, aTestPath, aDir)
|
||||
|
@ -122,11 +122,11 @@ function zList(base, zReader, baseJarName, recurse) {
|
|||
var links = {};
|
||||
var count = 0;
|
||||
var fileArray = [];
|
||||
|
||||
|
||||
while(dirs.hasMore()) {
|
||||
var entryName = dirs.getNext();
|
||||
if (entryName.substr(-1) == '/' && entryName.split('/').length == (base.split('/').length + 1) ||
|
||||
(entryName.substr(-1) != '/' && entryName.split('/').length == (base.split('/').length))) {
|
||||
(entryName.substr(-1) != '/' && entryName.split('/').length == (base.split('/').length))) {
|
||||
fileArray.push(entryName);
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ function getFileListing(basePath, testPath, dir, srvScope)
|
|||
|
||||
if (testPath != undefined) {
|
||||
var extraPath = testPath;
|
||||
|
||||
|
||||
var fileNameRegexp = /(browser|test)_.+\.(xul|html|js)$/;
|
||||
|
||||
// Invalid testPath...
|
||||
|
@ -341,7 +341,7 @@ function getTestFilePath(path) {
|
|||
}
|
||||
|
||||
/*
|
||||
* Simple utility function to take the directory structure in jarentryname and
|
||||
* Simple utility function to take the directory structure in jarentryname and
|
||||
* translate that to a path of a nsILocalFile.
|
||||
*/
|
||||
function buildRelativePath(jarentryname, destdir, basepath)
|
||||
|
|
|
@ -24,6 +24,7 @@ import re
|
|||
import shutil
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
|
@ -38,22 +39,14 @@ from mochitest_options import MochitestOptions
|
|||
from mozprofile import Profile, Preferences
|
||||
from mozprofile.permissions import ServerLocations
|
||||
from urllib import quote_plus as encodeURIComponent
|
||||
from mozlog.structured.formatters import TbplFormatter
|
||||
from mozlog.structured.handlers import StreamHandler
|
||||
from mozlog.structured.structuredlog import StructuredLogger
|
||||
|
||||
# This should use the `which` module already in tree, but it is
|
||||
# not yet present in the mozharness environment
|
||||
from mozrunner.utils import findInPath as which
|
||||
|
||||
# set up logging handler a la automation.py.in for compatability
|
||||
import logging
|
||||
log = logging.getLogger()
|
||||
def resetGlobalLog():
|
||||
while log.handlers:
|
||||
log.removeHandler(log.handlers[0])
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
log.setLevel(logging.INFO)
|
||||
log.addHandler(handler)
|
||||
resetGlobalLog()
|
||||
|
||||
###########################
|
||||
# Option for NSPR logging #
|
||||
###########################
|
||||
|
@ -64,6 +57,142 @@ resetGlobalLog()
|
|||
|
||||
NSPR_LOG_MODULES = ""
|
||||
|
||||
####################
|
||||
# LOG HANDLING #
|
||||
####################
|
||||
|
||||
### output processing
|
||||
class MochitestFormatter(TbplFormatter):
|
||||
log_num = 0
|
||||
|
||||
def __init__(self):
|
||||
super(MochitestFormatter, self).__init__()
|
||||
|
||||
def __call__(self, data):
|
||||
tbpl_output = super(MochitestFormatter, self).__call__(data)
|
||||
output = '%d INFO %s' % (MochitestFormatter.log_num, tbpl_output)
|
||||
MochitestFormatter.log_num += 1
|
||||
return output
|
||||
|
||||
### output processing
|
||||
class MessageLogger(object):
|
||||
"""File-like object for logging messages (structured logs)"""
|
||||
BUFFERING_THRESHOLD = 100
|
||||
# This is a delimiter used by the JS side to avoid logs interleaving
|
||||
DELIMITER = u'\ue175\uee31\u2c32\uacbf'
|
||||
BUFFERED_ACTIONS = set(['test_status', 'log'])
|
||||
VALID_ACTIONS = set(['suite_start', 'suite_end', 'test_start', 'test_end',
|
||||
'test_status', 'log',
|
||||
'buffering_on', 'buffering_off'])
|
||||
|
||||
def __init__(self, logger, buffering=True, name='mochitest'):
|
||||
self.logger = logger
|
||||
self.buffering = buffering
|
||||
self.tests_started = False
|
||||
|
||||
# Message buffering
|
||||
self.buffered_messages = []
|
||||
|
||||
def valid_message(self, obj):
|
||||
"""True if the given object is a valid structured message (only does a superficial validation)"""
|
||||
return isinstance(obj, dict) and 'action' in obj and obj['action'] in MessageLogger.VALID_ACTIONS
|
||||
|
||||
def parse_line(self, line):
|
||||
"""Takes a given line of input (structured or not) and returns a list of structured messages"""
|
||||
line = line.rstrip().decode("UTF-8", "replace")
|
||||
|
||||
messages = []
|
||||
for fragment in line.split(MessageLogger.DELIMITER):
|
||||
if not fragment:
|
||||
continue
|
||||
try:
|
||||
message = json.loads(fragment)
|
||||
if not self.valid_message(message):
|
||||
message = dict(action='log', level='info', message=fragment, unstructured=True)
|
||||
except ValueError:
|
||||
message = dict(action='log', level='info', message=fragment, unstructured=True)
|
||||
messages.append(message)
|
||||
|
||||
return messages
|
||||
|
||||
def process_message(self, message):
|
||||
"""Processes a structured message. Takes into account buffering, errors, ..."""
|
||||
if not self.tests_started and message['action'] == 'test_start':
|
||||
self.tests_started = True
|
||||
|
||||
# Activation/deactivating message buffering from the JS side
|
||||
if message['action'] == 'buffering_on':
|
||||
self.buffering = True
|
||||
return
|
||||
if message['action'] == 'buffering_off':
|
||||
self.buffering = False
|
||||
return
|
||||
|
||||
unstructured = False
|
||||
if 'unstructured' in message:
|
||||
unstructured = True
|
||||
message.pop('unstructured')
|
||||
|
||||
# If we don't do any buffering, or the tests haven't started, or the message was unstructured, it is directly logged
|
||||
if not self.buffering or unstructured or not self.tests_started:
|
||||
self.logger.log_raw(message)
|
||||
return
|
||||
|
||||
# If a test ended, we clean the buffer
|
||||
if message['action'] == 'test_end':
|
||||
self.buffered_messages = []
|
||||
|
||||
# Buffering logic; Also supports "raw" errors (in log messages) because some tests manually dump 'TEST-UNEXPECTED-FAIL'
|
||||
is_error = 'expected' in message or (message['action'] == 'log' and message['message'].startswith('TEST-UNEXPECTED'))
|
||||
if not is_error and message['action'] not in self.BUFFERED_ACTIONS:
|
||||
self.logger.log_raw(message)
|
||||
return
|
||||
|
||||
# test_status messages buffering
|
||||
if is_error:
|
||||
if self.buffered_messages:
|
||||
number_messages = min(self.BUFFERING_THRESHOLD, len(self.buffered_messages))
|
||||
self.logger.info("dumping last {0} message(s)".format(number_messages))
|
||||
self.logger.info("if you need more context, please use SimpleTest.requestCompleteLog() in your test")
|
||||
# Dumping previously buffered messages
|
||||
self.dump_buffered(limit=True)
|
||||
|
||||
# Logging the error message
|
||||
self.logger.log_raw(message)
|
||||
else:
|
||||
# Buffering the message
|
||||
self.buffered_messages.append(message)
|
||||
|
||||
def write(self, line):
|
||||
messages = self.parse_line(line)
|
||||
for message in messages:
|
||||
self.process_message(message)
|
||||
return messages
|
||||
|
||||
def flush(self):
|
||||
sys.stdout.flush()
|
||||
|
||||
def dump_buffered(self, limit=False):
|
||||
if limit:
|
||||
dumped_messages = self.buffered_messages[-self.BUFFERING_THRESHOLD:]
|
||||
else:
|
||||
dumped_messages = self.buffered_messages
|
||||
|
||||
for buf_msg in dumped_messages:
|
||||
self.logger.log_raw(buf_msg)
|
||||
# Cleaning the list of buffered messages
|
||||
self.buffered_messages = []
|
||||
|
||||
def finish(self):
|
||||
self.dump_buffered()
|
||||
self.buffering = False
|
||||
self.logger.suite_end()
|
||||
|
||||
# Global logger
|
||||
log = StructuredLogger('mochitest')
|
||||
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
|
||||
log.add_handler(stream_handler)
|
||||
|
||||
####################
|
||||
# PROCESS HANDLING #
|
||||
####################
|
||||
|
@ -81,7 +210,7 @@ def killPid(pid):
|
|||
try:
|
||||
os.kill(pid, getattr(signal, "SIGKILL", signal.SIGTERM))
|
||||
except Exception, e:
|
||||
log.info("Failed to kill process %d: %s", pid, str(e))
|
||||
log.info("Failed to kill process %d: %s" % (pid, str(e)))
|
||||
|
||||
if mozinfo.isWin:
|
||||
import ctypes, ctypes.wintypes, time, msvcrt
|
||||
|
@ -173,9 +302,9 @@ class MochitestServer(object):
|
|||
command = [xpcshell] + args
|
||||
self._process = mozprocess.ProcessHandler(command, cwd=SCRIPT_DIR, env=env)
|
||||
self._process.run()
|
||||
log.info("%s : launching %s", self.__class__.__name__, command)
|
||||
log.info("%s : launching %s" % (self.__class__.__name__, command))
|
||||
pid = self._process.pid
|
||||
log.info("runtests.py | Server pid: %d", pid)
|
||||
log.info("runtests.py | Server pid: %d" % pid)
|
||||
|
||||
def ensureReady(self, timeout):
|
||||
assert timeout >= 0
|
||||
|
@ -239,7 +368,7 @@ class WebSocketServer(object):
|
|||
self._process = mozprocess.ProcessHandler(cmd, cwd=SCRIPT_DIR)
|
||||
self._process.run()
|
||||
pid = self._process.pid
|
||||
log.info("runtests.py | Websocket server pid: %d", pid)
|
||||
log.info("runtests.py | Websocket server pid: %d" % pid)
|
||||
|
||||
def stop(self):
|
||||
self._process.kill()
|
||||
|
@ -378,8 +507,6 @@ class MochitestUtilsMixin(object):
|
|||
self.urlOpts.append("dumpAboutMemoryAfterTest=true")
|
||||
if options.dumpDMDAfterTest:
|
||||
self.urlOpts.append("dumpDMDAfterTest=true")
|
||||
if options.quiet:
|
||||
self.urlOpts.append("quiet=true")
|
||||
|
||||
def getTestFlavor(self, options):
|
||||
if options.browserChrome:
|
||||
|
@ -471,6 +598,10 @@ class MochitestUtilsMixin(object):
|
|||
continue
|
||||
paths.append(test)
|
||||
|
||||
# suite_start message
|
||||
flat_paths = [p['path'] for p in paths]
|
||||
self.message_logger.logger.suite_start(flat_paths)
|
||||
|
||||
# Bug 883865 - add this functionality into manifestparser
|
||||
with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile:
|
||||
manifestFile.write(json.dumps({'tests': paths}))
|
||||
|
@ -530,21 +661,21 @@ class MochitestUtilsMixin(object):
|
|||
log.info('Stopping web server')
|
||||
self.server.stop()
|
||||
except Exception:
|
||||
log.exception('Exception when stopping web server')
|
||||
log.critical('Exception when stopping web server')
|
||||
|
||||
if self.wsserver is not None:
|
||||
try:
|
||||
log.info('Stopping web socket server')
|
||||
self.wsserver.stop()
|
||||
except Exception:
|
||||
log.exception('Exception when stopping web socket server');
|
||||
log.critical('Exception when stopping web socket server');
|
||||
|
||||
if self.sslTunnel is not None:
|
||||
try:
|
||||
log.info('Stopping ssltunnel')
|
||||
self.sslTunnel.stop()
|
||||
except Exception:
|
||||
log.exception('Exception stopping ssltunnel');
|
||||
log.critical('Exception stopping ssltunnel');
|
||||
|
||||
def copyExtraFilesToProfile(self, options):
|
||||
"Copy extra files or dirs specified on the command line to the testing profile."
|
||||
|
@ -556,7 +687,7 @@ class MochitestUtilsMixin(object):
|
|||
dest = os.path.join(options.profilePath, os.path.basename(abspath))
|
||||
shutil.copytree(abspath, dest)
|
||||
else:
|
||||
log.warning("runtests.py | Failed to copy %s to profile", abspath)
|
||||
log.warning("runtests.py | Failed to copy %s to profile" % abspath)
|
||||
|
||||
def installChromeJar(self, chrome, options):
|
||||
"""
|
||||
|
@ -601,7 +732,7 @@ toolbar#nav-bar {
|
|||
|
||||
# Call installChromeJar().
|
||||
if not os.path.isdir(os.path.join(SCRIPT_DIR, self.jarDir)):
|
||||
log.testFail("invalid setup: missing mochikit extension")
|
||||
log.error(message="TEST-UNEXPECTED-FAIL | invalid setup: missing mochikit extension")
|
||||
return None
|
||||
|
||||
# Support Firefox (browser), B2G (shell), SeaMonkey (navigator), and Webapp
|
||||
|
@ -707,7 +838,7 @@ class SSLTunnel:
|
|||
self.process = mozprocess.ProcessHandler([ssltunnel, self.configFile],
|
||||
env=env)
|
||||
self.process.run()
|
||||
log.info("INFO | runtests.py | SSL tunnel pid: %d", self.process.pid)
|
||||
log.info("runtests.py | SSL tunnel pid: %d" % self.process.pid)
|
||||
|
||||
def stop(self):
|
||||
""" Stops the SSL Tunnel and cleans up """
|
||||
|
@ -841,6 +972,9 @@ class Mochitest(MochitestUtilsMixin):
|
|||
def __init__(self):
|
||||
super(Mochitest, self).__init__()
|
||||
|
||||
# Structured logs parser
|
||||
self.message_logger = MessageLogger(logger=log)
|
||||
|
||||
# environment function for browserEnv
|
||||
self.environment = environment
|
||||
|
||||
|
@ -984,7 +1118,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
# TODO: this should really be upstreamed somewhere, maybe mozprofile
|
||||
certificateStatus = self.fillCertificateDB(options)
|
||||
if certificateStatus:
|
||||
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Certificate integration failed")
|
||||
log.error("TEST-UNEXPECTED-FAIL | runtests.py | Certificate integration failed")
|
||||
return None
|
||||
|
||||
return manifest
|
||||
|
@ -1040,7 +1174,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
if os.path.exists(options.pidFile + ".xpcshell.pid"):
|
||||
os.remove(options.pidFile + ".xpcshell.pid")
|
||||
except:
|
||||
log.warn("cleaning up pidfile '%s' was unsuccessful from the test harness", options.pidFile)
|
||||
log.warning("cleaning up pidfile '%s' was unsuccessful from the test harness" % options.pidFile)
|
||||
options.manifestFile = None
|
||||
|
||||
def dumpScreen(self, utilityPath):
|
||||
|
@ -1083,12 +1217,12 @@ class Mochitest(MochitestUtilsMixin):
|
|||
"""Look for hung processes"""
|
||||
|
||||
if not os.path.exists(processLog):
|
||||
log.info('Automation Error: PID log not found: %s', processLog)
|
||||
log.info('Automation Error: PID log not found: %s' % processLog)
|
||||
# Whilst no hung process was found, the run should still display as a failure
|
||||
return True
|
||||
|
||||
# scan processLog for zombies
|
||||
log.info('INFO | zombiecheck | Reading PID log: %s', processLog)
|
||||
log.info('zombiecheck | Reading PID log: %s' % processLog)
|
||||
processList = []
|
||||
pidRE = re.compile(r'launched child process (\d+)$')
|
||||
with open(processLog) as processLogFD:
|
||||
|
@ -1101,10 +1235,10 @@ class Mochitest(MochitestUtilsMixin):
|
|||
# kill zombies
|
||||
foundZombie = False
|
||||
for processPID in processList:
|
||||
log.info("INFO | zombiecheck | Checking for orphan process with PID: %d", processPID)
|
||||
log.info("zombiecheck | Checking for orphan process with PID: %d" % processPID)
|
||||
if isPidAlive(processPID):
|
||||
foundZombie = True
|
||||
log.info("TEST-UNEXPECTED-FAIL | zombiecheck | child process %d still alive after shutdown", processPID)
|
||||
log.error("TEST-UNEXPECTED-FAIL | zombiecheck | child process %d still alive after shutdown" % processPID)
|
||||
self.killAndGetStack(processPID, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
|
||||
|
||||
return foundZombie
|
||||
|
@ -1154,12 +1288,16 @@ class Mochitest(MochitestUtilsMixin):
|
|||
webapprtChrome=False,
|
||||
screenshotOnFail=False,
|
||||
testPath=None,
|
||||
bisectChunk=None):
|
||||
bisectChunk=None,
|
||||
quiet=False):
|
||||
"""
|
||||
Run the app, log the duration it took to execute, return the status code.
|
||||
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
|
||||
"""
|
||||
|
||||
# configure the message logger buffering
|
||||
self.message_logger.buffering = quiet
|
||||
|
||||
# debugger information
|
||||
interactive = False
|
||||
debug_args = None
|
||||
|
@ -1253,7 +1391,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
interactive=interactive,
|
||||
outputTimeout=timeout)
|
||||
proc = runner.process_handler
|
||||
log.info("INFO | runtests.py | Application pid: %d", proc.pid)
|
||||
log.info("runtests.py | Application pid: %d" % proc.pid)
|
||||
|
||||
if onLaunch is not None:
|
||||
# Allow callers to specify an onLaunch callback to be fired after the
|
||||
|
@ -1281,11 +1419,12 @@ class Mochitest(MochitestUtilsMixin):
|
|||
|
||||
# record post-test information
|
||||
if status:
|
||||
log.info("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s", self.lastTestSeen, status)
|
||||
self.message_logger.dump_buffered()
|
||||
log.error("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % (self.lastTestSeen, status))
|
||||
else:
|
||||
self.lastTestSeen = 'Main app process exited normally'
|
||||
|
||||
log.info("INFO | runtests.py | Application ran for: %s", str(datetime.now() - startTime))
|
||||
log.info("runtests.py | Application ran for: %s" % str(datetime.now() - startTime))
|
||||
|
||||
# Do a final check for zombie child processes.
|
||||
zombieProcesses = self.checkForZombies(processLog, utilityPath, debuggerInfo)
|
||||
|
@ -1567,7 +1706,8 @@ class Mochitest(MochitestUtilsMixin):
|
|||
webapprtChrome=options.webapprtChrome,
|
||||
screenshotOnFail=options.screenshotOnFail,
|
||||
testPath=options.testPath,
|
||||
bisectChunk=options.bisectChunk
|
||||
bisectChunk=options.bisectChunk,
|
||||
quiet=options.quiet
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
log.info("runtests.py | Received keyboard interrupt.\n");
|
||||
|
@ -1592,6 +1732,8 @@ class Mochitest(MochitestUtilsMixin):
|
|||
|
||||
log.info("runtests.py | Running tests: end.")
|
||||
|
||||
self.message_logger.finish()
|
||||
|
||||
if self.manifest is not None:
|
||||
self.cleanup(options)
|
||||
|
||||
|
@ -1601,13 +1743,18 @@ class Mochitest(MochitestUtilsMixin):
|
|||
"""handle process output timeout"""
|
||||
# TODO: bug 913975 : _processOutput should call self.processOutputLine one more time one timeout (I think)
|
||||
if testPath:
|
||||
log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output on %s", self.lastTestSeen, int(timeout), testPath)
|
||||
error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output on %s" % (self.lastTestSeen, int(timeout), testPath)
|
||||
else:
|
||||
log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output", self.lastTestSeen, int(timeout))
|
||||
error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output" % (self.lastTestSeen, int(timeout))
|
||||
|
||||
self.message_logger.dump_buffered()
|
||||
self.message_logger.buffering = False
|
||||
log.error(error_message)
|
||||
|
||||
browserProcessId = browserProcessId or proc.pid
|
||||
self.killAndGetStack(browserProcessId, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
|
||||
|
||||
### output processing
|
||||
|
||||
|
||||
class OutputHandler(object):
|
||||
"""line output handler for mozrunner"""
|
||||
|
@ -1638,26 +1785,34 @@ class Mochitest(MochitestUtilsMixin):
|
|||
|
||||
def processOutputLine(self, line):
|
||||
"""per line handler of output for mozprocess"""
|
||||
for handler in self.outputHandlers():
|
||||
line = handler(line)
|
||||
if self.bisectChunk:
|
||||
self.record_result(line)
|
||||
self.first_error(line)
|
||||
# Parsing the line (by the structured messages logger).
|
||||
messages = self.harness.message_logger.parse_line(line)
|
||||
|
||||
for message in messages:
|
||||
# Passing the message to the handlers
|
||||
for handler in self.outputHandlers():
|
||||
message = handler(message)
|
||||
|
||||
# Processing the message by the logger
|
||||
self.harness.message_logger.process_message(message)
|
||||
|
||||
__call__ = processOutputLine
|
||||
|
||||
def outputHandlers(self):
|
||||
"""returns ordered list of output handlers"""
|
||||
return [self.fix_stack,
|
||||
self.format,
|
||||
self.record_last_test,
|
||||
self.dumpScreenOnTimeout,
|
||||
self.dumpScreenOnFail,
|
||||
self.metro_subprocess_id,
|
||||
self.trackShutdownLeaks,
|
||||
self.trackLSANLeaks,
|
||||
self.log,
|
||||
self.countline,
|
||||
]
|
||||
handlers = [self.fix_stack,
|
||||
self.record_last_test,
|
||||
self.dumpScreenOnTimeout,
|
||||
self.dumpScreenOnFail,
|
||||
self.trackShutdownLeaks,
|
||||
self.trackLSANLeaks,
|
||||
self.countline,
|
||||
]
|
||||
if self.bisectChunk:
|
||||
handlers.append(self.record_result)
|
||||
handlers.append(self.first_error)
|
||||
|
||||
return handlers
|
||||
|
||||
def stackFixer(self):
|
||||
"""
|
||||
|
@ -1701,7 +1856,7 @@ class Mochitest(MochitestUtilsMixin):
|
|||
self.stackFixerProcess.communicate()
|
||||
status = self.stackFixerProcess.returncode
|
||||
if status and not didTimeout:
|
||||
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Stack fixer process exited with code %d during test run", status)
|
||||
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Stack fixer process exited with code %d during test run" % status)
|
||||
|
||||
if self.shutdownLeaks:
|
||||
self.shutdownLeaks.process()
|
||||
|
@ -1709,34 +1864,38 @@ class Mochitest(MochitestUtilsMixin):
|
|||
if self.lsanLeaks:
|
||||
self.lsanLeaks.process()
|
||||
|
||||
# output message handlers:
|
||||
# these take a message and return a message
|
||||
|
||||
# output line handlers:
|
||||
# these take a line and return a line
|
||||
def record_result(self, line):
|
||||
if "TEST-START" in line: #by default make the result key equal to pass.
|
||||
key = line.split('|')[-1].split('/')[-1].strip()
|
||||
def record_result(self, message):
|
||||
if message['action'] == 'test_start': #by default make the result key equal to pass.
|
||||
key = message['test'].split('/')[-1].strip()
|
||||
self.harness.result[key] = "PASS"
|
||||
elif "TEST-UNEXPECTED" in line:
|
||||
key = line.split('|')[-2].split('/')[-1].strip()
|
||||
self.harness.result[key] = "FAIL"
|
||||
elif "TEST-KNOWN-FAIL" in line:
|
||||
key = line.split('|')[-2].split('/')[-1].strip()
|
||||
self.harness.result[key] = "TODO"
|
||||
return line
|
||||
elif message['action'] in ['test_end', 'test_status']:
|
||||
if 'expected' in message:
|
||||
key = message['test'].split('/')[-1].strip()
|
||||
self.harness.result[key] = "FAIL"
|
||||
elif message['status'] == 'FAIL':
|
||||
key = message['test'].split('/')[-1].strip()
|
||||
self.harness.result[key] = "TODO"
|
||||
return message
|
||||
|
||||
def first_error(self, line):
|
||||
if "TEST-UNEXPECTED-FAIL" in line:
|
||||
key = line.split('|')[-2].split('/')[-1].strip()
|
||||
def first_error(self, message):
|
||||
if 'expected' in message and message['status'] == 'FAIL':
|
||||
key = message['test'].split('/')[-1].strip()
|
||||
if key not in self.harness.expectedError:
|
||||
self.harness.expectedError[key] = line.split('|')[-1].strip()
|
||||
return line
|
||||
self.harness.expectedError[key] = message['message'].strip()
|
||||
return message
|
||||
|
||||
def countline(self, line):
|
||||
def countline(self, message):
|
||||
if message['action'] != 'log':
|
||||
return message
|
||||
line = message['message']
|
||||
val = 0
|
||||
try:
|
||||
val = int(line.split(':')[-1].strip())
|
||||
except ValueError, e:
|
||||
return line
|
||||
except ValueError:
|
||||
return message
|
||||
|
||||
if "Passed:" in line:
|
||||
self.harness.countpass += val
|
||||
|
@ -1744,56 +1903,42 @@ class Mochitest(MochitestUtilsMixin):
|
|||
self.harness.countfail += val
|
||||
elif "Todo:" in line:
|
||||
self.harness.counttodo += val
|
||||
return line
|
||||
return message
|
||||
|
||||
def fix_stack(self, line):
|
||||
if self.stackFixerFunction:
|
||||
return self.stackFixerFunction(line)
|
||||
return line
|
||||
def fix_stack(self, message):
|
||||
if message['action'] == 'log' and self.stackFixerFunction:
|
||||
message['message'] = self.stackFixerFunction(message['message'])
|
||||
return message
|
||||
|
||||
def format(self, line):
|
||||
"""format the line"""
|
||||
return line.rstrip().decode("UTF-8", "ignore")
|
||||
|
||||
def record_last_test(self, line):
|
||||
def record_last_test(self, message):
|
||||
"""record last test on harness"""
|
||||
if "TEST-START" in line and "|" in line:
|
||||
self.harness.lastTestSeen = line.split("|")[1].strip()
|
||||
return line
|
||||
if message['action'] == 'test_start':
|
||||
self.harness.lastTestSeen = message['test']
|
||||
return message
|
||||
|
||||
def dumpScreenOnTimeout(self, line):
|
||||
if not self.dump_screen_on_fail and self.dump_screen_on_timeout and "TEST-UNEXPECTED-FAIL" in line and "Test timed out" in line:
|
||||
def dumpScreenOnTimeout(self, message):
|
||||
if (not self.dump_screen_on_fail
|
||||
and self.dump_screen_on_timeout
|
||||
and 'expected' in message and message['status'] == 'FAIL'
|
||||
and 'message' in message
|
||||
and "Test timed out" in message['message']):
|
||||
self.harness.dumpScreen(self.utilityPath)
|
||||
return line
|
||||
return message
|
||||
|
||||
def dumpScreenOnFail(self, line):
|
||||
if self.dump_screen_on_fail and "TEST-UNEXPECTED-FAIL" in line:
|
||||
def dumpScreenOnFail(self, message):
|
||||
if self.dump_screen_on_fail and 'expected' in message and message['status'] == 'FAIL':
|
||||
self.harness.dumpScreen(self.utilityPath)
|
||||
return line
|
||||
return message
|
||||
|
||||
def metro_subprocess_id(self, line):
|
||||
"""look for metro browser subprocess id"""
|
||||
if "METRO_BROWSER_PROCESS" in line:
|
||||
index = line.find("=")
|
||||
if index != -1:
|
||||
self.browserProcessId = line[index+1:].rstrip()
|
||||
log.info("INFO | runtests.py | metro browser sub process id detected: %s", self.browserProcessId)
|
||||
return line
|
||||
def trackLSANLeaks(self, message):
|
||||
if self.lsanLeaks and message['action'] == 'log':
|
||||
self.lsanLeaks.log(message['message'])
|
||||
return message
|
||||
|
||||
def trackShutdownLeaks(self, line):
|
||||
def trackShutdownLeaks(self, message):
|
||||
if self.shutdownLeaks:
|
||||
self.shutdownLeaks.log(line)
|
||||
return line
|
||||
|
||||
def trackLSANLeaks(self, line):
|
||||
if self.lsanLeaks:
|
||||
self.lsanLeaks.log(line)
|
||||
return line
|
||||
|
||||
def log(self, line):
|
||||
log.info(line)
|
||||
return line
|
||||
|
||||
self.shutdownLeaks.log(message)
|
||||
return message
|
||||
|
||||
def makeTestConfig(self, options):
|
||||
"Creates a test configuration file for customizing test execution."
|
||||
|
|
|
@ -16,17 +16,18 @@ sys.path.insert(0, here)
|
|||
|
||||
from runtests import Mochitest
|
||||
from runtests import MochitestUtilsMixin
|
||||
from runtests import MochitestServer
|
||||
from runtests import MessageLogger
|
||||
from runtests import MochitestFormatter
|
||||
from mochitest_options import B2GOptions, MochitestOptions
|
||||
|
||||
from marionette import Marionette
|
||||
|
||||
from mozdevice import DeviceManagerADB
|
||||
from mozprofile import Profile, Preferences
|
||||
import mozlog
|
||||
import mozinfo
|
||||
from mozlog.structured.handlers import StreamHandler
|
||||
from mozlog.structured.structuredlog import StructuredLogger
|
||||
|
||||
log = mozlog.getLogger('Mochitest')
|
||||
log = StructuredLogger('Mochitest')
|
||||
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
|
||||
log.add_handler(stream_handler)
|
||||
|
||||
class B2GMochitest(MochitestUtilsMixin):
|
||||
marionette = None
|
||||
|
@ -45,6 +46,9 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
self.test_script_args = [self.out_of_process]
|
||||
self.product = 'b2g'
|
||||
|
||||
# structured logging
|
||||
self.message_logger = MessageLogger(logger=log)
|
||||
|
||||
if profile_data_dir:
|
||||
self.preferences = [os.path.join(profile_data_dir, f)
|
||||
for f in os.listdir(profile_data_dir) if f.startswith('pref')]
|
||||
|
@ -119,6 +123,9 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
manifest = self.build_profile(options)
|
||||
self.leak_report_file = os.path.join(options.profilePath, "runtests_leaks.log")
|
||||
|
||||
# configuring the message logger's buffering
|
||||
self.message_logger.buffering = options.quiet
|
||||
|
||||
if options.debugger or not options.autorun:
|
||||
timeout = None
|
||||
else:
|
||||
|
@ -132,7 +139,18 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
log.info("runtestsb2g.py | Running tests: start.")
|
||||
status = 0
|
||||
try:
|
||||
def on_output(line):
|
||||
messages = self.message_logger.write(line)
|
||||
for message in messages:
|
||||
if message['action'] == 'test_start':
|
||||
self.runner.last_test = message['test']
|
||||
|
||||
# The logging will be handled by on_output, so we set the stream to None
|
||||
process_args = {'processOutputLine': on_output,
|
||||
'stream': None}
|
||||
self.marionette_args['process_args'] = process_args
|
||||
self.marionette_args['profile'] = self.profile
|
||||
|
||||
self.marionette = Marionette(**self.marionette_args)
|
||||
self.runner = self.marionette.runner
|
||||
self.app_ctx = self.runner.app_ctx
|
||||
|
@ -171,6 +189,7 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
self.marionette.execute_script(self.test_script,
|
||||
script_args=self.test_script_args)
|
||||
status = self.runner.wait()
|
||||
|
||||
if status is None:
|
||||
# the runner has timed out
|
||||
status = 124
|
||||
|
@ -187,6 +206,7 @@ class B2GMochitest(MochitestUtilsMixin):
|
|||
self.stopServers()
|
||||
|
||||
log.info("runtestsb2g.py | Running tests: end.")
|
||||
self.message_logger.finish()
|
||||
|
||||
if manifest is not None:
|
||||
self.cleanup(manifest, options)
|
||||
|
|
|
@ -2,33 +2,36 @@
|
|||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import tempfile
|
||||
import re
|
||||
import traceback
|
||||
import shutil
|
||||
import math
|
||||
import base64
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import traceback
|
||||
|
||||
sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(__file__))))
|
||||
|
||||
from automation import Automation
|
||||
from remoteautomation import RemoteAutomation, fennecLogcatFilters
|
||||
from runtests import Mochitest
|
||||
from runtests import MochitestServer
|
||||
from runtests import Mochitest, MessageLogger, MochitestFormatter
|
||||
from mochitest_options import MochitestOptions
|
||||
|
||||
import devicemanager
|
||||
import droid
|
||||
import manifestparser
|
||||
import mozinfo
|
||||
import mozlog
|
||||
import moznetwork
|
||||
from mozlog.structured.handlers import StreamHandler
|
||||
from mozlog.structured.structuredlog import StructuredLogger
|
||||
|
||||
log = StructuredLogger('Mochi-Remote')
|
||||
stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
|
||||
log.add_handler(stream_handler)
|
||||
|
||||
SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
|
||||
log = mozlog.getLogger('Mochi-Remote')
|
||||
|
||||
class RemoteOptions(MochitestOptions):
|
||||
|
||||
|
@ -182,19 +185,19 @@ class RemoteOptions(MochitestOptions):
|
|||
# Robocop specific options
|
||||
if options.robocopIni != "":
|
||||
if not os.path.exists(options.robocopIni):
|
||||
log.error("Unable to find specified robocop .ini manifest '%s'", options.robocopIni)
|
||||
log.error("Unable to find specified robocop .ini manifest '%s'" % options.robocopIni)
|
||||
return None
|
||||
options.robocopIni = os.path.abspath(options.robocopIni)
|
||||
|
||||
if options.robocopApk != "":
|
||||
if not os.path.exists(options.robocopApk):
|
||||
log.error("Unable to find robocop APK '%s'", options.robocopApk)
|
||||
log.error("Unable to find robocop APK '%s'" % options.robocopApk)
|
||||
return None
|
||||
options.robocopApk = os.path.abspath(options.robocopApk)
|
||||
|
||||
if options.robocopIds != "":
|
||||
if not os.path.exists(options.robocopIds):
|
||||
log.error("Unable to find specified robocop IDs file '%s'", options.robocopIds)
|
||||
log.error("Unable to find specified robocop IDs file '%s'" % options.robocopIds)
|
||||
return None
|
||||
options.robocopIds = os.path.abspath(options.robocopIds)
|
||||
|
||||
|
@ -225,9 +228,9 @@ class MochiRemote(Mochitest):
|
|||
_automation = None
|
||||
_dm = None
|
||||
localProfile = None
|
||||
logLines = []
|
||||
logMessages = []
|
||||
|
||||
def __init__(self, automation, devmgr, options):
|
||||
def __init__(self, automation, devmgr, options, message_logger=None):
|
||||
self._automation = automation
|
||||
Mochitest.__init__(self)
|
||||
self._dm = devmgr
|
||||
|
@ -239,13 +242,15 @@ class MochiRemote(Mochitest):
|
|||
self._automation.deleteANRs()
|
||||
self.certdbNew = True
|
||||
|
||||
# structured logging
|
||||
self.message_logger = message_logger or MessageLogger(logger=log)
|
||||
|
||||
def cleanup(self, options):
|
||||
if self._dm.fileExists(self.remoteLog):
|
||||
self._dm.getFile(self.remoteLog, self.localLog)
|
||||
self._dm.removeFile(self.remoteLog)
|
||||
else:
|
||||
log.warn("Unable to retrieve log file (%s) from remote device",
|
||||
self.remoteLog)
|
||||
log.warning("Unable to retrieve log file (%s) from remote device" % self.remoteLog)
|
||||
self._dm.removeDir(self.remoteProfile)
|
||||
Mochitest.cleanup(self, options)
|
||||
|
||||
|
@ -295,7 +300,7 @@ class MochiRemote(Mochitest):
|
|||
]
|
||||
options.xrePath = self.findPath(paths)
|
||||
if options.xrePath == None:
|
||||
log.error("unable to find xulrunner path for %s, please specify with --xre-path", os.name)
|
||||
log.error("unable to find xulrunner path for %s, please specify with --xre-path" % os.name)
|
||||
sys.exit(1)
|
||||
|
||||
xpcshell = "xpcshell"
|
||||
|
@ -309,7 +314,7 @@ class MochiRemote(Mochitest):
|
|||
options.utilityPath = self.findPath(paths, xpcshell)
|
||||
|
||||
if options.utilityPath == None:
|
||||
log.error("unable to find utility path for %s, please specify with --utility-path", os.name)
|
||||
log.error("unable to find utility path for %s, please specify with --utility-path" % os.name)
|
||||
sys.exit(1)
|
||||
|
||||
xpcshell_path = os.path.join(options.utilityPath, xpcshell)
|
||||
|
@ -416,26 +421,28 @@ class MochiRemote(Mochitest):
|
|||
def addLogData(self):
|
||||
with open(self.localLog) as currentLog:
|
||||
data = currentLog.readlines()
|
||||
|
||||
restart = re.compile('0 INFO SimpleTest START.*')
|
||||
reend = re.compile('([0-9]+) INFO TEST-START . Shutdown.*')
|
||||
refail = re.compile('([0-9]+) INFO TEST-UNEXPECTED-FAIL.*')
|
||||
start_found = False
|
||||
end_found = False
|
||||
fail_found = False
|
||||
for line in data:
|
||||
if reend.match(line):
|
||||
try:
|
||||
message = json.loads(line)
|
||||
if not isinstance(message, dict) or not 'action' in message:
|
||||
continue
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if message['action'] == 'test_end':
|
||||
end_found = True
|
||||
start_found = False
|
||||
break
|
||||
|
||||
if start_found and not end_found:
|
||||
# Append the line without the number to increment
|
||||
self.logLines.append(' '.join(line.split(' ')[1:]))
|
||||
self.logMessages.append(message)
|
||||
|
||||
if restart.match(line):
|
||||
if message['action'] == 'test_start':
|
||||
start_found = True
|
||||
if refail.match(line):
|
||||
if 'expected' in message:
|
||||
fail_found = True
|
||||
result = 0
|
||||
if fail_found:
|
||||
|
@ -452,12 +459,15 @@ class MochiRemote(Mochitest):
|
|||
incr = 1
|
||||
logFile = []
|
||||
logFile.append("0 INFO SimpleTest START")
|
||||
for line in self.logLines:
|
||||
if line.startswith("INFO TEST-PASS"):
|
||||
passed += 1
|
||||
elif line.startswith("INFO TEST-UNEXPECTED"):
|
||||
for message in self.logMessages:
|
||||
if 'status' not in message:
|
||||
continue
|
||||
|
||||
if 'expected' in message:
|
||||
failed += 1
|
||||
elif line.startswith("INFO TEST-KNOWN"):
|
||||
elif message['status'] == 'PASS':
|
||||
passed += 1
|
||||
elif message['status'] == 'FAIL':
|
||||
todo += 1
|
||||
incr += 1
|
||||
|
||||
|
@ -489,25 +499,25 @@ class MochiRemote(Mochitest):
|
|||
printed = 0
|
||||
for name in self._dm.listFiles(screenShotDir):
|
||||
fullName = screenShotDir + "/" + name
|
||||
log.info("SCREENSHOT: FOUND: [%s]", fullName)
|
||||
log.info("SCREENSHOT: FOUND: [%s]" % fullName)
|
||||
try:
|
||||
image = self._dm.pullFile(fullName)
|
||||
encoded = base64.b64encode(image)
|
||||
log.info("SCREENSHOT: data:image/jpg;base64,%s", encoded)
|
||||
log.info("SCREENSHOT: data:image/jpg;base64,%s" % encoded)
|
||||
printed += 1
|
||||
except:
|
||||
log.info("SCREENSHOT: Could not be parsed")
|
||||
pass
|
||||
|
||||
log.info("SCREENSHOT: TOTAL PRINTED: [%s]", printed)
|
||||
log.info("SCREENSHOT: TOTAL PRINTED: [%s]" % printed)
|
||||
|
||||
def printDeviceInfo(self, printLogcat=False):
|
||||
try:
|
||||
if printLogcat:
|
||||
logcat = self._dm.getLogcat(filterOutRegexps=fennecLogcatFilters)
|
||||
log.info('\n'+(''.join(logcat)))
|
||||
log.info("Device info: %s", self._dm.getInfo())
|
||||
log.info("Test root: %s", self._dm.deviceRoot)
|
||||
log.info('\n' + ''.join(logcat).decode('utf-8', 'replace'))
|
||||
log.info("Device info: %s" % self._dm.getInfo())
|
||||
log.info("Test root: %s" % self._dm.deviceRoot)
|
||||
except devicemanager.DMError:
|
||||
log.warn("Error getting device information")
|
||||
|
||||
|
@ -528,9 +538,9 @@ class MochiRemote(Mochitest):
|
|||
for key, value in browserEnv.items():
|
||||
try:
|
||||
value.index(',')
|
||||
log.error("buildRobotiumConfig: browserEnv - Found a ',' in our value, unable to process value. key=%s,value=%s", key, value)
|
||||
log.error("browserEnv=%s", browserEnv)
|
||||
except ValueError, e:
|
||||
log.error("buildRobotiumConfig: browserEnv - Found a ',' in our value, unable to process value. key=%s,value=%s" % (key, value))
|
||||
log.error("browserEnv=%s" % browserEnv)
|
||||
except ValueError:
|
||||
envstr += "%s%s=%s" % (delim, key, value)
|
||||
delim = ","
|
||||
|
||||
|
@ -558,10 +568,85 @@ class MochiRemote(Mochitest):
|
|||
# it trying to set up ssltunnel as well
|
||||
kwargs['runSSLTunnel'] = False
|
||||
|
||||
if 'quiet' in kwargs:
|
||||
kwargs.pop('quiet')
|
||||
|
||||
return self._automation.runApp(*args, **kwargs)
|
||||
|
||||
class RobocopMochiRemote(MochiRemote):
|
||||
"""This class maintains compatibility with the robocop logging system
|
||||
that is still unstructured."""
|
||||
|
||||
def addLogData(self):
|
||||
with open(self.localLog) as currentLog:
|
||||
data = currentLog.readlines()
|
||||
|
||||
restart = re.compile('SimpleTest START.*')
|
||||
reend = re.compile('TEST-START . Shutdown.*')
|
||||
refail = re.compile('TEST-UNEXPECTED-FAIL.*')
|
||||
start_found = False
|
||||
end_found = False
|
||||
fail_found = False
|
||||
for line in data:
|
||||
if reend.match(line):
|
||||
end_found = True
|
||||
start_found = False
|
||||
break
|
||||
|
||||
if start_found and not end_found:
|
||||
self.logMessages.append(line)
|
||||
|
||||
if restart.match(line):
|
||||
start_found = True
|
||||
if refail.match(line):
|
||||
fail_found = True
|
||||
result = 0
|
||||
if fail_found:
|
||||
result = 1
|
||||
if not end_found:
|
||||
log.error("Automation Error: Missing end of test marker (process crashed?)")
|
||||
result = 1
|
||||
return result
|
||||
|
||||
def printLog(self):
|
||||
passed = 0
|
||||
failed = 0
|
||||
todo = 0
|
||||
incr = 1
|
||||
logFile = []
|
||||
logFile.append("0 INFO SimpleTest START")
|
||||
for line in self.logMessages:
|
||||
if line.startswith("TEST-PASS"):
|
||||
passed += 1
|
||||
elif line.startswith("TEST-UNEXPECTED"):
|
||||
failed += 1
|
||||
elif line.startswith("TEST-KNOWN"):
|
||||
todo += 1
|
||||
incr += 1
|
||||
|
||||
logFile.append("%s INFO TEST-START | Shutdown" % incr)
|
||||
incr += 1
|
||||
logFile.append("%s INFO Passed: %s" % (incr, passed))
|
||||
incr += 1
|
||||
logFile.append("%s INFO Failed: %s" % (incr, failed))
|
||||
incr += 1
|
||||
logFile.append("%s INFO Todo: %s" % (incr, todo))
|
||||
incr += 1
|
||||
logFile.append("%s INFO SimpleTest FINISHED" % incr)
|
||||
|
||||
# TODO: Consider not printing to stdout because we might be duplicating output
|
||||
print '\n'.join(logFile)
|
||||
with open(self.localLog, 'w') as localLog:
|
||||
localLog.write('\n'.join(logFile))
|
||||
|
||||
if failed > 0:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def main():
|
||||
auto = RemoteAutomation(None, "fennec")
|
||||
message_logger = MessageLogger(logger=log)
|
||||
process_args = {'messageLogger': message_logger}
|
||||
auto = RemoteAutomation(None, "fennec", processArgs=process_args)
|
||||
parser = RemoteOptions(auto)
|
||||
options, args = parser.parse_args()
|
||||
|
||||
|
@ -585,7 +670,8 @@ def main():
|
|||
auto.setProduct(options.remoteProductName)
|
||||
auto.setAppName(options.remoteappname)
|
||||
|
||||
mochitest = MochiRemote(auto, dm, options)
|
||||
mochitest_cls = RobocopMochiRemote if options.robocopIni != "" else MochiRemote
|
||||
mochitest = mochitest_cls(auto, dm, options, message_logger)
|
||||
|
||||
options = parser.verifyOptions(options, mochitest)
|
||||
if (options == None):
|
||||
|
@ -628,6 +714,8 @@ def main():
|
|||
my_tests = tests
|
||||
for test in robocop_tests:
|
||||
tests.append(test['name'])
|
||||
# suite_start message when running robocop tests
|
||||
log.suite_start(tests)
|
||||
|
||||
if options.totalChunks:
|
||||
tests_per_chunk = math.ceil(len(tests) / (options.totalChunks * 1.0))
|
||||
|
@ -636,7 +724,7 @@ def main():
|
|||
if end > len(tests):
|
||||
end = len(tests)
|
||||
my_tests = tests[start:end]
|
||||
log.info("Running tests %d-%d/%d", start+1, end, len(tests))
|
||||
log.info("Running tests %d-%d/%d" % (start+1, end, len(tests)))
|
||||
|
||||
dm.removeFile(os.path.join(deviceRoot, "fennec_ids.txt"))
|
||||
fennec_ids = os.path.abspath(os.path.join(SCRIPT_DIR, "fennec_ids.txt"))
|
||||
|
@ -705,7 +793,7 @@ def main():
|
|||
dm.recordLogcat()
|
||||
result = mochitest.runTests(options)
|
||||
if result != 0:
|
||||
log.error("runTests() exited with code %s", result)
|
||||
log.error("runTests() exited with code %s" % result)
|
||||
log_result = mochitest.addLogData()
|
||||
if result != 0 or log_result != 0:
|
||||
mochitest.printDeviceInfo(printLogcat=True)
|
||||
|
@ -734,7 +822,7 @@ def main():
|
|||
if (options.dm_trans == "sut"):
|
||||
dm._runCmds([{"cmd": " ".join(cmd_del)}])
|
||||
if retVal is None:
|
||||
log.warn("No tests run. Did you pass an invalid TEST_PATH?")
|
||||
log.warning("No tests run. Did you pass an invalid TEST_PATH?")
|
||||
retVal = 1
|
||||
else:
|
||||
# if we didn't have some kind of error running the tests, make
|
||||
|
|
|
@ -53,7 +53,7 @@ MemoryStats.constructPathname = function (directory, basename) {
|
|||
return d.path;
|
||||
}
|
||||
|
||||
MemoryStats.dump = function (dumpFn,
|
||||
MemoryStats.dump = function (logger,
|
||||
testNumber,
|
||||
testURL,
|
||||
dumpOutputDirectory,
|
||||
|
@ -75,9 +75,9 @@ MemoryStats.dump = function (dumpFn,
|
|||
MemoryStats._hasMemoryStatistics[stat] = supported;
|
||||
}
|
||||
if (supported == MEM_STAT_SUPPORTED) {
|
||||
dumpFn("TEST-INFO | MEMORY STAT " + stat + " after test: " + mrm[stat]);
|
||||
logger.info("MEMORY STAT " + stat + " after test: " + mrm[stat]);
|
||||
} else if (firstAccess) {
|
||||
dumpFn("TEST-INFO | MEMORY STAT " + stat + " not supported in this build configuration.");
|
||||
logger.info("MEMORY STAT " + stat + " not supported in this build configuration.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,20 +85,19 @@ MemoryStats.dump = function (dumpFn,
|
|||
var basename = "about-memory-" + testNumber + ".json.gz";
|
||||
var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory,
|
||||
basename);
|
||||
dumpFn("TEST-INFO | " + testURL + " | MEMDUMP-START " + dumpfile);
|
||||
logger.info(testURL + " | MEMDUMP-START " + dumpfile);
|
||||
var md = MemoryStats._getService("@mozilla.org/memory-info-dumper;1",
|
||||
"nsIMemoryInfoDumper");
|
||||
md.dumpMemoryReportsToNamedFile(dumpfile, function () {
|
||||
dumpFn("TEST-INFO | " + testURL + " | MEMDUMP-END");
|
||||
logger.info("TEST-INFO | " + testURL + " | MEMDUMP-END");
|
||||
}, null, /* anonymize = */ false);
|
||||
|
||||
}
|
||||
|
||||
if (dumpDMD && typeof(DMDReportAndDump) != undefined) {
|
||||
var basename = "dmd-" + testNumber + ".txt";
|
||||
var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory,
|
||||
basename);
|
||||
dumpFn("TEST-INFO | " + testURL + " | DMD-DUMP " + dumpfile);
|
||||
logger.info(testURL + " | DMD-DUMP " + dumpfile);
|
||||
DMDReportAndDump(dumpfile);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -91,7 +91,7 @@ if (typeof(repr) == 'undefined') {
|
|||
}
|
||||
return ostring;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/* This returns a function that applies the previously given parameters.
|
||||
* This is used by SimpleTest.showReport
|
||||
|
@ -116,7 +116,7 @@ if (typeof(partial) == 'undefined') {
|
|||
if (typeof(getElement) == 'undefined') {
|
||||
this.getElement = function(id) {
|
||||
return ((typeof(id) == "string") ?
|
||||
document.getElementById(id) : id);
|
||||
document.getElementById(id) : id);
|
||||
};
|
||||
this.$ = this.getElement;
|
||||
}
|
||||
|
@ -190,10 +190,10 @@ if (typeof(computedStyle) == 'undefined') {
|
|||
if (typeof(style) == 'undefined' || style === null) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
||||
var selectorCase = cssProperty.replace(/([A-Z])/g, '-$1'
|
||||
).toLowerCase();
|
||||
|
||||
|
||||
return style.getPropertyValue(selectorCase);
|
||||
};
|
||||
}
|
||||
|
@ -235,7 +235,9 @@ SimpleTest._cleanupFunctions = [];
|
|||
**/
|
||||
SimpleTest.ok = function (condition, name, diag) {
|
||||
var test = {'result': !!condition, 'name': name, 'diag': diag};
|
||||
SimpleTest._logResult(test, "TEST-PASS", "TEST-UNEXPECTED-FAIL");
|
||||
var successInfo = {status:"PASS", expected:"PASS", message:"TEST-PASS"};
|
||||
var failureInfo = {status:"FAIL", expected:"PASS", message:"TEST-UNEXPECTED-FAIL"};
|
||||
SimpleTest._logResult(test, successInfo, failureInfo);
|
||||
SimpleTest._tests.push(test);
|
||||
};
|
||||
|
||||
|
@ -284,7 +286,9 @@ SimpleTest.doesThrow = function(fn, name) {
|
|||
|
||||
SimpleTest.todo = function(condition, name, diag) {
|
||||
var test = {'result': !!condition, 'name': name, 'diag': diag, todo: true};
|
||||
SimpleTest._logResult(test, "TEST-UNEXPECTED-PASS", "TEST-KNOWN-FAIL");
|
||||
var successInfo = {status:"PASS", expected:"FAIL", message:"TEST-UNEXPECTED-PASS"};
|
||||
var failureInfo = {status:"FAIL", expected:"FAIL", message:"TEST-KNOWN-FAIL"};
|
||||
SimpleTest._logResult(test, successInfo, failureInfo);
|
||||
SimpleTest._tests.push(test);
|
||||
};
|
||||
|
||||
|
@ -313,130 +317,65 @@ SimpleTest._getCurrentTestURL = function() {
|
|||
"unknown test url";
|
||||
};
|
||||
|
||||
SimpleTest._forceLogMessageOutput = parentRunner && !parentRunner.quiet;
|
||||
SimpleTest._forceLogMessageOutput = false;
|
||||
|
||||
/**
|
||||
* Force all test messages to be displayed. Only applies for the current test.
|
||||
*/
|
||||
SimpleTest.requestCompleteLog = function() {
|
||||
if (SimpleTest._forceLogMessageOutput)
|
||||
if (!parentRunner || SimpleTest._forceLogMessageOutput) {
|
||||
return;
|
||||
}
|
||||
|
||||
parentRunner.structuredLogger.deactivateBuffering();
|
||||
SimpleTest._forceLogMessageOutput = true;
|
||||
|
||||
SimpleTest.registerCleanupFunction(function() {
|
||||
parentRunner.structuredLogger.activateBuffering();
|
||||
SimpleTest._forceLogMessageOutput = false;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* A circular buffer, managed by _logResult. We explicitly manage the
|
||||
* circularness of the buffer, rather than resorting to .shift()/.push()
|
||||
* because explicit management is much faster.
|
||||
*/
|
||||
SimpleTest._bufferedMessages = [];
|
||||
SimpleTest._logResult = (function () {
|
||||
var bufferingThreshold = 100;
|
||||
var outputIndex = 0;
|
||||
SimpleTest._logResult = function (test, passInfo, failInfo) {
|
||||
var url = SimpleTest._getCurrentTestURL();
|
||||
var result = test.result ? passInfo : failInfo;
|
||||
var diagnostic = test.diag || null;
|
||||
// BUGFIX : coercing test.name to a string, because some a11y tests pass an xpconnect object
|
||||
var subtest = test.name ? String(test.name) : null;
|
||||
var isError = !test.result == !test.todo;
|
||||
|
||||
function logResult(test, passString, failString) {
|
||||
var url = SimpleTest._getCurrentTestURL();
|
||||
var resultString = test.result ? passString : failString;
|
||||
var diagnostic = test.name + (test.diag ? " - " + test.diag : "");
|
||||
var msg = [resultString, url, diagnostic].join(" | ");
|
||||
var isError = !test.result == !test.todo;
|
||||
|
||||
// Due to JavaScript's name lookup rules, it is important that
|
||||
// the second parameter here be named identically to the isError
|
||||
// variable declared above.
|
||||
function dumpMessage(msg, isError) {
|
||||
if (parentRunner) {
|
||||
if (isError) {
|
||||
parentRunner.addFailedTest(url);
|
||||
parentRunner.error(msg);
|
||||
} else {
|
||||
parentRunner.log(msg);
|
||||
}
|
||||
} else if (typeof dump === "function") {
|
||||
dump(msg + "\n");
|
||||
} else {
|
||||
// Non-Mozilla browser? Just do nothing.
|
||||
if (parentRunner) {
|
||||
if (!result.status || !result.expected) {
|
||||
if (diagnostic) {
|
||||
parentRunner.structuredLogger.info(diagnostic);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Detect when SimpleTest.reset() has been called, so we can
|
||||
// reset outputIndex. We store outputIndex as local state to
|
||||
// avoid adding even more state to SimpleTest.
|
||||
if (SimpleTest._bufferedMessages.length == 0) {
|
||||
outputIndex = 0;
|
||||
}
|
||||
|
||||
// We want to eliminate mundane TEST-PASS/TEST-KNOWN-FAIL
|
||||
// output, since some tests produce tens of thousands of of such
|
||||
// messages. These messages can consume a lot of memory to
|
||||
// generate and take a significant amount of time to output.
|
||||
// However, the reality is that TEST-PASS messages can also be
|
||||
// used as a form of logging via constructs like:
|
||||
//
|
||||
// SimpleTest.ok(true, "some informative message");
|
||||
//
|
||||
// And eliding the logging can be very confusing when trying to
|
||||
// debug test failures.
|
||||
//
|
||||
// Hence the compromise adopted here: We buffer messages up to
|
||||
// some limit and dump the buffer when a test failure happens.
|
||||
// This behavior ought to provide enough context for developers
|
||||
// looking to understand where in the test things failed.
|
||||
if (isError) {
|
||||
// Display this message and all the messages we have buffered.
|
||||
if (SimpleTest._bufferedMessages.length > 0) {
|
||||
dumpMessage("TEST-INFO | dumping last " + SimpleTest._bufferedMessages.length + " message(s)");
|
||||
dumpMessage("TEST-INFO | if you need more context, please use SimpleTest.requestCompleteLog() in your test");
|
||||
|
||||
function dumpBufferedMessage(m) {
|
||||
dumpMessage(m, false);
|
||||
}
|
||||
// The latest message is just before outputIndex.
|
||||
// The earliest message is located at outputIndex.
|
||||
var earliest = SimpleTest._bufferedMessages.slice(outputIndex);
|
||||
var latest = SimpleTest._bufferedMessages.slice(0, outputIndex);
|
||||
earliest.map(dumpBufferedMessage);
|
||||
latest.map(dumpBufferedMessage);
|
||||
|
||||
SimpleTest._bufferedMessages = [];
|
||||
}
|
||||
|
||||
dumpMessage(msg);
|
||||
return;
|
||||
parentRunner.addFailedTest(url);
|
||||
}
|
||||
|
||||
var runningSingleTest = ((parentRunner &&
|
||||
parentRunner._urls.length == 1) ||
|
||||
isSingleTestRun);
|
||||
var shouldLogImmediately = (runningSingleTest ||
|
||||
SimpleTest._forceLogMessageOutput);
|
||||
|
||||
if (!shouldLogImmediately) {
|
||||
// Buffer the message for possible later output.
|
||||
if (SimpleTest._bufferedMessages.length >= bufferingThreshold) {
|
||||
if (outputIndex >= bufferingThreshold) {
|
||||
outputIndex = 0;
|
||||
}
|
||||
SimpleTest._bufferedMessages[outputIndex] = msg;
|
||||
outputIndex++;
|
||||
} else {
|
||||
SimpleTest._bufferedMessages.push(msg);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
dumpMessage(msg);
|
||||
parentRunner.structuredLogger.testStatus(url,
|
||||
subtest,
|
||||
result.status,
|
||||
result.expected,
|
||||
diagnostic);
|
||||
} else if (typeof dump === "function") {
|
||||
var debugMsg = [url, result.message, diagnostic].join(' | ');
|
||||
dump(debugMsg + "\n");
|
||||
} else {
|
||||
// Non-Mozilla browser? Just do nothing.
|
||||
}
|
||||
|
||||
return logResult;
|
||||
})();
|
||||
};
|
||||
|
||||
SimpleTest.info = function(name, message) {
|
||||
SimpleTest._logResult({result:true, name:name, diag:message}, "TEST-INFO");
|
||||
var log = name + ' | ' + message;
|
||||
if (parentRunner) {
|
||||
parentRunner.structuredLogger.info(log);
|
||||
} else {
|
||||
dump(log + '\n');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -531,14 +470,14 @@ SimpleTest.toggleByClass = function (cls, evt) {
|
|||
var clsName = child.className;
|
||||
if (!clsName) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
var classNames = clsName.split(' ');
|
||||
for (var j = 0; j < classNames.length; j++) {
|
||||
if (classNames[j] == cls) {
|
||||
elements.push(child);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (var t=0; t<elements.length; t++) {
|
||||
//TODO: again, for-in loop over elems seems to break this
|
||||
|
@ -864,7 +803,12 @@ SimpleTest.registerCleanupFunction = function(aFunc) {
|
|||
**/
|
||||
SimpleTest.finish = function() {
|
||||
if (SimpleTest._alreadyFinished) {
|
||||
SimpleTest.ok(false, "[SimpleTest.finish()] this test already called finish!");
|
||||
var err = "[SimpleTest.finish()] this test already called finish!";
|
||||
if (parentRunner) {
|
||||
parentRunner.structuredLogger.error(err);
|
||||
} else {
|
||||
dump(err + '\n');
|
||||
}
|
||||
}
|
||||
|
||||
SimpleTest._alreadyFinished = true;
|
||||
|
|
|
@ -65,6 +65,158 @@ function flattenArguments(lst/* ...*/) {
|
|||
return res;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* StructuredLogger: Structured logger class following the mozlog.structured protocol
|
||||
*
|
||||
*
|
||||
**/
|
||||
var VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
|
||||
|
||||
function StructuredLogger(name) {
|
||||
this.name = name;
|
||||
this.testsStarted = [];
|
||||
|
||||
/* test logs */
|
||||
|
||||
this.testStart = function(test) {
|
||||
var data = {test: test};
|
||||
this._logData("test_start", data);
|
||||
};
|
||||
|
||||
this.testStatus = function(test, subtest, status, expected="PASS", message=null) {
|
||||
var data = {test: test, subtest: subtest, status: status};
|
||||
|
||||
if (message !== null) {
|
||||
data.message = message;
|
||||
}
|
||||
if (expected != status && status != 'SKIP') {
|
||||
data.expected = expected;
|
||||
}
|
||||
|
||||
this._logData("test_status", data);
|
||||
};
|
||||
|
||||
this.testEnd = function(test, status, expected="OK", message=null, extra=null) {
|
||||
var data = {test: test, status: status};
|
||||
|
||||
if (message !== null) {
|
||||
data.message = message;
|
||||
}
|
||||
if (expected != status) {
|
||||
data.expected = expected;
|
||||
}
|
||||
if (extra !== null) {
|
||||
data.extra = extra;
|
||||
}
|
||||
|
||||
if (!this.testsStarted.pop(test)) {
|
||||
this.error("Test '" + test + "' was ended more than once or never started. " +
|
||||
"Ended with this data: " + JSON.stringify(data));
|
||||
return;
|
||||
}
|
||||
|
||||
this._logData("test_end", data);
|
||||
};
|
||||
|
||||
this.suiteStart = function(tests, runinfo) {
|
||||
runinfo = typeof runinfo !== "undefined" ? runinfo : null;
|
||||
|
||||
var data = {tests: tests};
|
||||
if (runinfo !== null) {
|
||||
data.runinfo = runinfo;
|
||||
}
|
||||
|
||||
this._logData("suite_start", data);
|
||||
};
|
||||
|
||||
this.suiteEnd = function() {
|
||||
this._logData("suite_end");
|
||||
};
|
||||
|
||||
this.testStart = function(test) {
|
||||
this.testsStarted.push(test);
|
||||
var data = {test: test};
|
||||
this._logData("test_start", data);
|
||||
};
|
||||
|
||||
/* log action: human readable logs */
|
||||
|
||||
this._log = function(level, message) {
|
||||
// Coercing the message parameter to a string, in case an invalid value is passed.
|
||||
message = String(message);
|
||||
var data = {level: level, message: message};
|
||||
this._logData('log', data);
|
||||
};
|
||||
|
||||
this.debug = function(message) {
|
||||
this._log('DEBUG', message);
|
||||
};
|
||||
|
||||
this.info = function(message) {
|
||||
this._log('INFO', message);
|
||||
};
|
||||
|
||||
this.warning = function(message) {
|
||||
this._log('WARNING', message);
|
||||
};
|
||||
|
||||
this.error = function(message) {
|
||||
this._log("ERROR", message);
|
||||
};
|
||||
|
||||
this.critical = function(message) {
|
||||
this._log('CRITICAL', message);
|
||||
};
|
||||
|
||||
/* Special mochitest messages for deactivating/activating buffering */
|
||||
|
||||
this.deactivateBuffering = function() {
|
||||
this._logData("buffering_off");
|
||||
};
|
||||
this.activateBuffering = function() {
|
||||
this._logData("buffering_on");
|
||||
};
|
||||
|
||||
/* dispatches a log to handlers */
|
||||
|
||||
this._logData = function(action, data) {
|
||||
data = typeof data !== "undefined" ? data : null;
|
||||
|
||||
if (data === null) {
|
||||
data = {};
|
||||
}
|
||||
|
||||
var allData = {action: action,
|
||||
time: new Date().getTime(),
|
||||
thread: null,
|
||||
pid: null,
|
||||
source: this.name};
|
||||
|
||||
for (var attrname in data) {
|
||||
allData[attrname] = data[attrname];
|
||||
}
|
||||
|
||||
this._dumpMessage(allData);
|
||||
};
|
||||
|
||||
this._dumpMessage = function(message) {
|
||||
var str = JSON.stringify(message);
|
||||
// BUGFIX: browser-chrome tests doesn't use LogController
|
||||
if (Object.keys(LogController.listeners).length !== 0) {
|
||||
LogController.log(str);
|
||||
} else {
|
||||
dump('\n' + str + '\n');
|
||||
}
|
||||
};
|
||||
|
||||
/* Message validation. Only checking the action for now */
|
||||
this.validMessage = function(message) {
|
||||
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* TestRunner: A test runner for SimpleTest
|
||||
* TODO:
|
||||
|
@ -91,7 +243,6 @@ TestRunner.runSlower = false;
|
|||
TestRunner.dumpOutputDirectory = "";
|
||||
TestRunner.dumpAboutMemoryAfterTest = false;
|
||||
TestRunner.dumpDMDAfterTest = false;
|
||||
TestRunner.quiet = false;
|
||||
TestRunner.slowestTestTime = 0;
|
||||
TestRunner.slowestTestURL = "";
|
||||
|
||||
|
@ -126,6 +277,7 @@ TestRunner._checkForHangs = function() {
|
|||
if (runtime >= TestRunner.timeout * TestRunner._timeoutFactor) {
|
||||
var frameWindow = $('testframe').contentWindow.wrappedJSObject ||
|
||||
$('testframe').contentWindow;
|
||||
// TODO : Do this in a way that reports that the test ended with a status "TIMEOUT"
|
||||
reportError(frameWindow, "Test timed out.");
|
||||
|
||||
// If we have too many timeouts, give up. We don't want to wait hours
|
||||
|
@ -205,11 +357,11 @@ TestRunner.generateFailureList = function () {
|
|||
/**
|
||||
* If logEnabled is true, this is the logger that will be used.
|
||||
**/
|
||||
TestRunner.logger = LogController;
|
||||
TestRunner.structuredLogger = new StructuredLogger('mochitest');
|
||||
|
||||
TestRunner.log = function(msg) {
|
||||
if (TestRunner.logEnabled) {
|
||||
TestRunner.logger.log(msg);
|
||||
TestRunner.structuredLogger.info(msg);
|
||||
} else {
|
||||
dump(msg + "\n");
|
||||
}
|
||||
|
@ -217,7 +369,7 @@ TestRunner.log = function(msg) {
|
|||
|
||||
TestRunner.error = function(msg) {
|
||||
if (TestRunner.logEnabled) {
|
||||
TestRunner.logger.error(msg);
|
||||
TestRunner.structuredLogger.error(msg);
|
||||
} else {
|
||||
dump(msg + "\n");
|
||||
}
|
||||
|
@ -264,7 +416,7 @@ TestRunner._makeIframe = function (url, retry) {
|
|||
return;
|
||||
}
|
||||
|
||||
TestRunner.log("Error: Unable to restore focus, expect failures and timeouts.");
|
||||
TestRunner.structuredLogger.error("Unable to restore focus, expect failures and timeouts.");
|
||||
}
|
||||
window.scrollTo(0, $('indicator').offsetTop);
|
||||
iframe.src = url;
|
||||
|
@ -294,12 +446,13 @@ TestRunner.getLoadedTestURL = function () {
|
|||
*
|
||||
**/
|
||||
TestRunner.runTests = function (/*url...*/) {
|
||||
TestRunner.log("SimpleTest START");
|
||||
TestRunner.structuredLogger.info("SimpleTest START");
|
||||
TestRunner.originalTestURL = $("current-test").innerHTML;
|
||||
|
||||
SpecialPowers.registerProcessCrashObservers();
|
||||
|
||||
TestRunner._urls = flattenArguments(arguments);
|
||||
|
||||
$('testframe').src="";
|
||||
TestRunner._checkForHangs();
|
||||
TestRunner.runNextTest();
|
||||
|
@ -314,7 +467,7 @@ TestRunner.resetTests = function(listURLs) {
|
|||
// Reset our "Current-test" line - functionality depends on it
|
||||
$("current-test").innerHTML = TestRunner.originalTestURL;
|
||||
if (TestRunner.logEnabled)
|
||||
TestRunner.log("SimpleTest START Loop " + TestRunner._currentLoop);
|
||||
TestRunner.structuredLogger.info("SimpleTest START Loop " + TestRunner._currentLoop);
|
||||
|
||||
TestRunner._urls = listURLs;
|
||||
$('testframe').src="";
|
||||
|
@ -340,41 +493,49 @@ TestRunner.runNextTest = function() {
|
|||
TestRunner._expectedMinAsserts = 0;
|
||||
TestRunner._expectedMaxAsserts = 0;
|
||||
|
||||
TestRunner.log("TEST-START | " + url); // used by automation.py
|
||||
TestRunner.structuredLogger.testStart(url);
|
||||
|
||||
TestRunner._makeIframe(url, 0);
|
||||
} else {
|
||||
$("current-test").innerHTML = "<b>Finished</b>";
|
||||
TestRunner._makeIframe("about:blank", 0);
|
||||
|
||||
if (parseInt($("pass-count").innerHTML) == 0 &&
|
||||
parseInt($("fail-count").innerHTML) == 0 &&
|
||||
parseInt($("todo-count").innerHTML) == 0)
|
||||
var passCount = parseInt($("pass-count").innerHTML, 10);
|
||||
var failCount = parseInt($("fail-count").innerHTML, 10);
|
||||
var todoCount = parseInt($("todo-count").innerHTML, 10);
|
||||
|
||||
if (passCount === 0 &&
|
||||
failCount === 0 &&
|
||||
todoCount === 0)
|
||||
{
|
||||
// No |$('testframe').contentWindow|, so manually update: ...
|
||||
// ... the log,
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | (SimpleTest/TestRunner.js) | No checks actually run.");
|
||||
// ... the count,
|
||||
$("fail-count").innerHTML = 1;
|
||||
// ... the indicator.
|
||||
var indicator = $("indicator");
|
||||
indicator.innerHTML = "Status: Fail (No checks actually run)";
|
||||
indicator.style.backgroundColor = "red";
|
||||
// No |$('testframe').contentWindow|, so manually update: ...
|
||||
// ... the log,
|
||||
TestRunner.structuredLogger.testEnd('SimpleTest/TestRunner.js',
|
||||
"ERROR",
|
||||
"OK",
|
||||
"No checks actually run");
|
||||
// ... the count,
|
||||
$("fail-count").innerHTML = 1;
|
||||
// ... the indicator.
|
||||
var indicator = $("indicator");
|
||||
indicator.innerHTML = "Status: Fail (No checks actually run)";
|
||||
indicator.style.backgroundColor = "red";
|
||||
}
|
||||
|
||||
SpecialPowers.unregisterProcessCrashObservers();
|
||||
|
||||
TestRunner.log("TEST-START | Shutdown"); // used by automation.py
|
||||
TestRunner.log("Passed: " + $("pass-count").innerHTML);
|
||||
TestRunner.log("Failed: " + $("fail-count").innerHTML);
|
||||
TestRunner.log("Todo: " + $("todo-count").innerHTML);
|
||||
TestRunner.log("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL);
|
||||
TestRunner.structuredLogger.info("TEST-START | Shutdown");
|
||||
TestRunner.structuredLogger.info("Passed: " + passCount);
|
||||
TestRunner.structuredLogger.info("Failed: " + failCount);
|
||||
TestRunner.structuredLogger.info("Todo: " + todoCount);
|
||||
TestRunner.structuredLogger.info("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL);
|
||||
|
||||
// If we are looping, don't send this cause it closes the log file
|
||||
if (TestRunner.repeat == 0) {
|
||||
TestRunner.log("SimpleTest FINISHED");
|
||||
if (TestRunner.repeat === 0) {
|
||||
TestRunner.structuredLogger.info("SimpleTest FINISHED");
|
||||
}
|
||||
|
||||
if (TestRunner.repeat == 0 && TestRunner.onComplete) {
|
||||
if (TestRunner.repeat === 0 && TestRunner.onComplete) {
|
||||
TestRunner.onComplete();
|
||||
}
|
||||
|
||||
|
@ -385,8 +546,8 @@ TestRunner.runNextTest = function() {
|
|||
} else {
|
||||
// Loops are finished
|
||||
if (TestRunner.logEnabled) {
|
||||
TestRunner.log("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops");
|
||||
TestRunner.log("SimpleTest FINISHED");
|
||||
TestRunner.structuredLogger.info("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops");
|
||||
TestRunner.structuredLogger.info("SimpleTest FINISHED");
|
||||
}
|
||||
|
||||
if (TestRunner.onComplete)
|
||||
|
@ -408,16 +569,19 @@ TestRunner.testFinished = function(tests) {
|
|||
// have a chance to unload it.
|
||||
if (TestRunner._currentTest == TestRunner._lastTestFinished &&
|
||||
!TestRunner._loopIsRestarting) {
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | called finish() multiple times");
|
||||
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
|
||||
"ERROR",
|
||||
"OK",
|
||||
"called finish() multiple times");
|
||||
TestRunner.updateUI([{ result: false }]);
|
||||
return;
|
||||
}
|
||||
TestRunner._lastTestFinished = TestRunner._currentTest;
|
||||
TestRunner._loopIsRestarting = false;
|
||||
|
||||
MemoryStats.dump(TestRunner.log, TestRunner._currentTest,
|
||||
// TODO : replace this by a function that returns the mem data as an object
|
||||
// that's dumped later with the test_end message
|
||||
MemoryStats.dump(TestRunner.structuredLogger, TestRunner._currentTest,
|
||||
TestRunner.currentTestURL,
|
||||
TestRunner.dumpOutputDirectory,
|
||||
TestRunner.dumpAboutMemoryAfterTest,
|
||||
|
@ -425,41 +589,51 @@ TestRunner.testFinished = function(tests) {
|
|||
|
||||
function cleanUpCrashDumpFiles() {
|
||||
if (!SpecialPowers.removeExpectedCrashDumpFiles(TestRunner._expectingProcessCrash)) {
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | This test did not leave any crash dumps behind, but we were expecting some!");
|
||||
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
|
||||
"ERROR",
|
||||
"OK",
|
||||
"This test did not leave any crash dumps behind, but we were expecting some!");
|
||||
tests.push({ result: false });
|
||||
}
|
||||
var unexpectedCrashDumpFiles =
|
||||
SpecialPowers.findUnexpectedCrashDumpFiles();
|
||||
TestRunner._expectingProcessCrash = false;
|
||||
if (unexpectedCrashDumpFiles.length) {
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | This test left crash dumps behind, but we " +
|
||||
"weren't expecting it to!");
|
||||
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
|
||||
"ERROR",
|
||||
"OK",
|
||||
"This test left crash dumps behind, but we " +
|
||||
"weren't expecting it to!",
|
||||
{unexpected_crashdump_files: unexpectedCrashDumpFiles});
|
||||
tests.push({ result: false });
|
||||
unexpectedCrashDumpFiles.sort().forEach(function(aFilename) {
|
||||
TestRunner.log("TEST-INFO | Found unexpected crash dump file " +
|
||||
aFilename + ".");
|
||||
TestRunner.structuredLogger.info("Found unexpected crash dump file " +
|
||||
aFilename + ".");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function runNextTest() {
|
||||
if (TestRunner.currentTestURL != TestRunner.getLoadedTestURL()) {
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | " + TestRunner.getLoadedTestURL() +
|
||||
" finished in a non-clean fashion, probably" +
|
||||
" because it didn't call SimpleTest.finish()");
|
||||
TestRunner.structuredLogger.testStatus(TestRunner.currentTestURL,
|
||||
TestRunner.getLoadedTestURL(),
|
||||
"ERROR",
|
||||
"OK",
|
||||
"finished in a non-clean fashion, probably" +
|
||||
" because it didn't call SimpleTest.finish()",
|
||||
{loaded_test_url: TestRunner.getLoadedTestURL()});
|
||||
tests.push({ result: false });
|
||||
}
|
||||
|
||||
var runtime = new Date().valueOf() - TestRunner._currentTestStartTime;
|
||||
TestRunner.log("TEST-END | " +
|
||||
TestRunner.currentTestURL +
|
||||
" | finished in " + runtime + "ms");
|
||||
|
||||
TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
|
||||
"OK",
|
||||
undefined,
|
||||
"Finished in " + runtime + "ms",
|
||||
{runtime: runtime}
|
||||
);
|
||||
|
||||
if (TestRunner.slowestTestTime < runtime && TestRunner._timeoutFactor == 1) {
|
||||
TestRunner.slowestTestTime = runtime;
|
||||
TestRunner.slowestTestURL = TestRunner.currentTestURL;
|
||||
|
@ -496,13 +670,28 @@ TestRunner.testUnloaded = function() {
|
|||
var max = TestRunner._expectedMaxAsserts;
|
||||
var min = TestRunner._expectedMinAsserts;
|
||||
if (numAsserts > max) {
|
||||
TestRunner.error("TEST-UNEXPECTED-FAIL | " + url + " | Assertion count " + numAsserts + " is greater than expected range " + min + "-" + max + " assertions.");
|
||||
TestRunner.structuredLogger.testEnd(url,
|
||||
"ERROR",
|
||||
"OK",
|
||||
"Assertion count " + numAsserts + " is greater than expected range " +
|
||||
min + "-" + max + " assertions.",
|
||||
{assertions: numAsserts, min_asserts: min, max_asserts: max});
|
||||
TestRunner.updateUI([{ result: false }]);
|
||||
} else if (numAsserts < min) {
|
||||
TestRunner.error("TEST-UNEXPECTED-PASS | " + url + " | Assertion count " + numAsserts + " is less than expected range " + min + "-" + max + " assertions.");
|
||||
TestRunner.structuredLogger.testEnd(url,
|
||||
"OK",
|
||||
"ERROR",
|
||||
"Assertion count " + numAsserts + " is less than expected range " +
|
||||
min + "-" + max + " assertions.",
|
||||
{assertions: numAsserts, min_asserts: min, max_asserts: max});
|
||||
TestRunner.updateUI([{ result: false }]);
|
||||
} else if (numAsserts > 0) {
|
||||
TestRunner.log("TEST-KNOWN-FAIL | " + url + " | Assertion count " + numAsserts + " within expected range " + min + "-" + max + " assertions.");
|
||||
TestRunner.structuredLogger.testEnd(url,
|
||||
"ERROR",
|
||||
"ERROR",
|
||||
"Assertion count " + numAsserts + " within expected range " +
|
||||
min + "-" + max + " assertions.",
|
||||
{assertions: numAsserts, min_asserts: min, max_asserts: max});
|
||||
}
|
||||
}
|
||||
TestRunner._currentTest++;
|
||||
|
|
|
@ -10,7 +10,7 @@ TestRunner.logger = LogController;
|
|||
/* Helper function */
|
||||
parseQueryString = function(encodedString, useArrays) {
|
||||
// strip a leading '?' from the encoded string
|
||||
var qstr = (encodedString[0] == "?") ? encodedString.substring(1) :
|
||||
var qstr = (encodedString[0] == "?") ? encodedString.substring(1) :
|
||||
encodedString;
|
||||
var pairs = qstr.replace(/\+/g, "%20").split(/(\&\;|\&\#38\;|\&|\&)/);
|
||||
var o = {};
|
||||
|
@ -92,7 +92,7 @@ var consoleLevel = params.consoleLevel || null;
|
|||
// repeat tells us how many times to repeat the tests
|
||||
if (params.repeat) {
|
||||
TestRunner.repeat = params.repeat;
|
||||
}
|
||||
}
|
||||
|
||||
if (params.runUntilFailure) {
|
||||
TestRunner.runUntilFailure = true;
|
||||
|
@ -135,17 +135,14 @@ if (params.dumpDMDAfterTest) {
|
|||
TestRunner.dumpDMDAfterTest = true;
|
||||
}
|
||||
|
||||
if (params.quiet) {
|
||||
TestRunner.quiet = true;
|
||||
}
|
||||
|
||||
// Log things to the console if appropriate.
|
||||
TestRunner.logger.addListener("dumpListener", consoleLevel + "", function(msg) {
|
||||
dump(msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n");
|
||||
var data = formatLogMessage(msg);
|
||||
dump(data);
|
||||
});
|
||||
|
||||
var gTestList = [];
|
||||
var RunSet = {}
|
||||
var RunSet = {};
|
||||
RunSet.runall = function(e) {
|
||||
// Filter tests to include|exclude tests based on data in params.filter.
|
||||
// This allows for including or excluding tests from the gTestList
|
||||
|
@ -190,7 +187,7 @@ RunSet.reloadAndRunAll = function(e) {
|
|||
window.location.href += "&autorun=1";
|
||||
} else {
|
||||
window.location.href += "?autorun=1";
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// UI Stuff
|
||||
|
@ -245,7 +242,7 @@ function hookupTests(testList) {
|
|||
}
|
||||
|
||||
document.getElementById('runtests').onclick = RunSet.reloadAndRunAll;
|
||||
document.getElementById('toggleNonTests').onclick = toggleNonTests;
|
||||
document.getElementById('toggleNonTests').onclick = toggleNonTests;
|
||||
// run automatically if autorun specified
|
||||
if (params.autorun) {
|
||||
RunSet.runall();
|
||||
|
|
|
@ -33,15 +33,7 @@ if (params.logFile) {
|
|||
TestRunner.logger.addListener("mozLogger", fileLevel + "", spl.getLogCallback());
|
||||
}
|
||||
|
||||
// if we get a quiet param, don't log to the console
|
||||
if (!params.quiet) {
|
||||
function dumpListener(msg) {
|
||||
dump("*** " + msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n");
|
||||
}
|
||||
TestRunner.logger.addListener("dumpListener", consoleLevel + "", dumpListener);
|
||||
}
|
||||
|
||||
var RunSet = {}
|
||||
var RunSet = {};
|
||||
RunSet.runall = function() {
|
||||
TestRunner.runTests(
|
||||
'test_bug362788.xhtml'
|
||||
|
@ -57,7 +49,7 @@ RunSet.reloadAndRunAll = function() {
|
|||
}
|
||||
};
|
||||
|
||||
// run automatically if
|
||||
// run automatically if
|
||||
if (params.autorun) {
|
||||
RunSet.runall();
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import time
|
|||
|
||||
import base
|
||||
|
||||
|
||||
def format_seconds(total):
|
||||
"""Format number of seconds to MM:SS.DD form."""
|
||||
minutes, seconds = divmod(total, 60)
|
||||
|
|
|
@ -63,7 +63,7 @@ class StreamHandler(BaseHandler):
|
|||
with self._lock:
|
||||
#XXX Should encoding be the formatter's responsibility?
|
||||
try:
|
||||
self.stream.write(formatted.encode("utf8"))
|
||||
self.stream.write(formatted.encode("utf8", "replace"))
|
||||
except:
|
||||
raise
|
||||
self.stream.flush()
|
||||
|
|
|
@ -113,13 +113,23 @@ class StructuredLogger(object):
|
|||
message is logged from this logger"""
|
||||
return self._handlers[self.name]
|
||||
|
||||
def log_raw(self, data):
|
||||
if "action" not in data:
|
||||
raise ValueError
|
||||
data = self._make_log_data(data['action'], data)
|
||||
self._handle_log(data)
|
||||
|
||||
def _log_data(self, action, data=None):
|
||||
if data is None:
|
||||
data = {}
|
||||
|
||||
log_data = self._make_log_data(action, data)
|
||||
self._handle_log(log_data)
|
||||
|
||||
def _handle_log(self, data):
|
||||
with self._lock:
|
||||
log_data = self._make_log_data(action, data)
|
||||
for handler in self.handlers:
|
||||
handler(log_data)
|
||||
handler(data)
|
||||
|
||||
def _make_log_data(self, action, data):
|
||||
all_data = {"action": action,
|
||||
|
|
|
@ -20,10 +20,11 @@ class DeviceRunner(BaseRunner):
|
|||
remote devices (or emulators), such as B2G.
|
||||
"""
|
||||
def __init__(self, device_class, device_args=None, **kwargs):
|
||||
process_args = kwargs.get('process_args', {})
|
||||
process_args.update({ 'stream': sys.stdout,
|
||||
'processOutputLine': self.on_output,
|
||||
'onTimeout': self.on_timeout })
|
||||
process_args = {'stream': sys.stdout,
|
||||
'processOutputLine': self.on_output,
|
||||
'onTimeout': self.on_timeout }
|
||||
process_args.update(kwargs.get('process_args') or {})
|
||||
|
||||
kwargs['process_args'] = process_args
|
||||
BaseRunner.__init__(self, **kwargs)
|
||||
|
||||
|
|
|
@ -6,16 +6,20 @@
|
|||
|
||||
function MozillaLogger(aPath) {
|
||||
}
|
||||
// This delimiter is used to avoid interleaving Mochitest/Gecko logs.
|
||||
var LOG_DELIMITER = String.fromCharCode(0xe175) + String.fromCharCode(0xee31) + String.fromCharCode(0x2c32) + String.fromCharCode(0xacbf);
|
||||
function formatLogMessage(msg) {
|
||||
return LOG_DELIMITER + msg.info.join(' ') + LOG_DELIMITER + "\n";
|
||||
}
|
||||
|
||||
MozillaLogger.prototype = {
|
||||
|
||||
init : function(path) {},
|
||||
|
||||
|
||||
getLogCallback : function() {
|
||||
return function (msg) {
|
||||
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
|
||||
var data = formatLogMessage(msg);
|
||||
dump(data);
|
||||
}
|
||||
};
|
||||
},
|
||||
|
||||
log : function(msg) {
|
||||
|
@ -44,13 +48,13 @@ SpecialPowersLogger.prototype = {
|
|||
|
||||
getLogCallback : function () {
|
||||
return function (msg) {
|
||||
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
|
||||
var data = formatLogMessage(msg);
|
||||
SpecialPowers.log(data);
|
||||
|
||||
if (data.indexOf("SimpleTest FINISH") >= 0) {
|
||||
SpecialPowers.closeLogFile();
|
||||
}
|
||||
}
|
||||
};
|
||||
},
|
||||
|
||||
log : function (msg) {
|
||||
|
@ -78,7 +82,7 @@ function MozillaFileLogger(aPath) {
|
|||
}
|
||||
|
||||
MozillaFileLogger.prototype = {
|
||||
|
||||
|
||||
init : function (path) {
|
||||
var PR_WRITE_ONLY = 0x02; // Open for writing only.
|
||||
var PR_CREATE_FILE = 0x08;
|
||||
|
@ -90,30 +94,38 @@ MozillaFileLogger.prototype = {
|
|||
createInstance(Components.interfaces.nsIFileOutputStream);
|
||||
this._foStream.init(this._file, PR_WRITE_ONLY | PR_CREATE_FILE | PR_APPEND,
|
||||
436 /* 0664 */, 0);
|
||||
|
||||
this._converter = Components.classes["@mozilla.org/intl/converter-output-stream;1"].
|
||||
createInstance(Components.interfaces.nsIConverterOutputStream);
|
||||
this._converter.init(this._foStream, "UTF-8", 0, 0);
|
||||
},
|
||||
|
||||
getLogCallback : function() {
|
||||
return function (msg) {
|
||||
var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
|
||||
if (MozillaFileLogger._foStream)
|
||||
this._foStream.write(data, data.length);
|
||||
var data = formatLogMessage(msg);
|
||||
if (MozillaFileLogger._converter) {
|
||||
this._converter.writeString(data);
|
||||
}
|
||||
|
||||
if (data.indexOf("SimpleTest FINISH") >= 0) {
|
||||
MozillaFileLogger.close();
|
||||
}
|
||||
}
|
||||
};
|
||||
},
|
||||
|
||||
log : function(msg) {
|
||||
if (this._foStream)
|
||||
this._foStream.write(msg, msg.length);
|
||||
if (this._converter) {
|
||||
this._converter.writeString(msg);
|
||||
}
|
||||
},
|
||||
|
||||
close : function() {
|
||||
if(this._foStream)
|
||||
this._foStream.close();
|
||||
|
||||
if (this._converter) {
|
||||
this._converter.flush();
|
||||
this._converter.close();
|
||||
}
|
||||
|
||||
this._foStream = null;
|
||||
this._converter = null;
|
||||
this._file = null;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -49,7 +49,7 @@ function runTests() {
|
|||
DebuggerServer.init();
|
||||
}
|
||||
|
||||
function TestActor() {dump("instanciate test actor");}
|
||||
function TestActor() {dump("instanciate test actor\n");}
|
||||
TestActor.prototype = {
|
||||
actorPrefix: "test",
|
||||
|
||||
|
|
|
@ -357,7 +357,7 @@ function close_manager(aManagerWindow, aCallback, aLongerTimeout) {
|
|||
|
||||
aManagerWindow.addEventListener("unload", function() {
|
||||
try {
|
||||
dump("Manager window unload handler");
|
||||
dump("Manager window unload handler\n");
|
||||
this.removeEventListener("unload", arguments.callee, false);
|
||||
resolve();
|
||||
} catch(e) {
|
||||
|
@ -1103,7 +1103,7 @@ MockAddon.prototype = {
|
|||
get applyBackgroundUpdates() {
|
||||
return this._applyBackgroundUpdates;
|
||||
},
|
||||
|
||||
|
||||
set applyBackgroundUpdates(val) {
|
||||
if (val != AddonManager.AUTOUPDATE_DEFAULT &&
|
||||
val != AddonManager.AUTOUPDATE_DISABLE &&
|
||||
|
|
Загрузка…
Ссылка в новой задаче