Bug 1321127 - Add a assertion_count action to mozlog, r=ahal

This is intended as a structured replacement for the assertion checks
that previously used unstructured logs. It adds a log action
assertion_count, which takes the actual number of assertions observed
during a test, the minimum expeced number and the maximum expected
number. It also updates the reftest harness to use this logging.

MozReview-Commit-ID: JgjLlaYuvSG
This commit is contained in:
James Graham 2016-11-30 17:26:16 +00:00
Родитель f2d55370bb
Коммит d8c76e7543
7 изменённых файлов: 109 добавлений и 28 удалений

Просмотреть файл

@ -1850,28 +1850,7 @@ function DoAssertionCheck(numAsserts)
var minAsserts = gURLs[0].minAsserts;
var maxAsserts = gURLs[0].maxAsserts;
var expectedAssertions = "expected " + minAsserts;
if (minAsserts != maxAsserts) {
expectedAssertions += " to " + maxAsserts;
}
expectedAssertions += " assertions";
if (numAsserts < minAsserts) {
++gTestResults.AssertionUnexpectedFixed;
gDumpFn("REFTEST TEST-UNEXPECTED-PASS | " + gURLs[0].prettyPath +
" | assertion count " + numAsserts + " is less than " +
expectedAssertions + "\n");
} else if (numAsserts > maxAsserts) {
++gTestResults.AssertionUnexpected;
gDumpFn("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath +
" | assertion count " + numAsserts + " is more than " +
expectedAssertions + "\n");
} else if (numAsserts != 0) {
++gTestResults.AssertionKnown;
gDumpFn("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath +
"assertion count " + numAsserts + " matches " +
expectedAssertions + "\n");
}
logger.assertionCount(gCurrentURL, numAsserts, minAsserts, maxAsserts);
}
if (gURLs[0].chaosMode) {

Просмотреть файл

@ -87,6 +87,15 @@ StructuredLogger.prototype = {
this._logData("test_end", data);
},
assertionCount: function (test, count, minExpected=0, maxExpected=0) {
var data = {test: test,
min_expected: minExpected,
max_expected: maxExpected,
count: count};
this._logData("assertion_count", data);
},
suiteStart: function (tests, runinfo=null, versioninfo=null, deviceinfo=null, extra=null) {
var data = {tests: tests.map(x => this._testId(x))};
if (runinfo !== null) {

Просмотреть файл

@ -54,6 +54,7 @@ class MachFormatter(base.BaseFormatter):
self.summary_values = {"tests": 0,
"subtests": 0,
"assertion_counts": 0,
"expected": 0,
"unexpected": defaultdict(int),
"skipped": 0}
@ -87,6 +88,10 @@ class MachFormatter(base.BaseFormatter):
color = self.terminal.yellow
elif data["action"] == "crash":
color = self.terminal.red
elif data["action"] == "assertion_count":
if (data["count"] > data["max_expected"] or
data["count"] < data["min_expected"]):
color = self.terminal.red
if color is not None:
action = color(action)
@ -111,6 +116,7 @@ class MachFormatter(base.BaseFormatter):
def suite_start(self, data):
self.summary_values = {"tests": 0,
"subtests": 0,
"assertion_counts": 0,
"expected": 0,
"unexpected": defaultdict(int),
"skipped": 0}
@ -124,14 +130,21 @@ class MachFormatter(base.BaseFormatter):
rv = ["", heading, "=" * len(heading), ""]
has_subtests = self.summary_values["subtests"] > 0
has_assert_counts = self.summary_values["assertion_counts"] > 0
test_count = self.summary_values["tests"]
components = ["%i parents" % self.summary_values["tests"]]
if has_subtests:
rv.append("Ran %i tests (%i parents, %i subtests)" %
(self.summary_values["tests"] + self.summary_values["subtests"],
self.summary_values["tests"],
self.summary_values["subtests"]))
else:
rv.append("Ran %i tests" % self.summary_values["tests"])
test_count += self.summary_values["subtests"]
components.append("%i subtests" % self.summary_values["subtests"])
if has_assert_counts:
test_count += self.summary_values["assertion_counts"]
components.append("%i assertion counts" % self.summary_values["assertion_counts"])
summary = "Ran %i tests" % test_count
if len(components) > 1:
summary += " (%s)" % ", ".join(components)
rv.append(summary)
rv.append("Expected results: %i" % self.summary_values["expected"])
@ -279,6 +292,24 @@ class MachFormatter(base.BaseFormatter):
message))
return rv
def assertion_count(self, data):
self.summary_values["assertion_counts"] += 1
if data["min_expected"] != data["max_expected"]:
expected = "%i to %i" % (data["min_expected"],
data["max_expected"])
else:
expected = "%i" % data["min_expected"]
if data["min_expected"] <= data["count"] <= data["max_expected"]:
return
elif data["max_expected"] < data["count"]:
status = "FAIL"
else:
status = "PASS"
self.summary_values["unexpected"][status] += 1
return "Assertion count %i, expected %s assertions\n" % (data["count"], expected)
def _update_summary(self, data):
if "expected" in data:
self.summary_values["unexpected"][data["status"]] += 1

Просмотреть файл

@ -139,6 +139,25 @@ class TbplFormatter(BaseFormatter):
else:
return self._format_status(data)
def assertion_count(self, data):
if data["min_expected"] != data["max_expected"]:
expected = "%i to %i" % (data["min_expected"],
data["max_expected"])
else:
expected = "%i" % data["min_expected"]
if data["count"] < data["min_expected"]:
status, comparison = "TEST-UNEXPECTED-PASS", "is less than"
elif data["count"] > data["max_expected"]:
status, comparison = "TEST-UNEXPECTED-FAIL", "is more than"
elif data["count"] > 0:
status, comparison = "TEST-KNOWN-FAIL", "matches"
else:
return
return ("%s | %s | assertion count %i %s expected %s assertions\n" %
(status, data["test"], data["count"], comparison, expected))
def _format_status(self, data):
message = "- " + data["message"] if "message" in data else ""
if "stack" in data:

Просмотреть файл

@ -40,6 +40,21 @@ class UnittestFormatter(base.BaseFormatter):
char = "S"
return char
def assertion_count(self, data):
if data["count"] < data["min_expected"]:
char = "X"
elif data["count"] > data["max_expected"]:
char = "F"
self.fails.append({"test": data["test"],
"message": ("assertion count %i is greated than %i" %
(data["count"], data["max_expected"]))})
elif data["count"] > 0:
char = "."
else:
char = "."
return char
def suite_end(self, data):
self.end_time = data["time"]
summary = "\n".join([self.output_fails(),

Просмотреть файл

@ -43,6 +43,16 @@ class StatusHandler(object):
else:
self.expected_statuses[status] += 1
if action == "assertion_count":
if data["count"] < data["min_expected"]:
self.unexpected_statuses["PASS"] += 1
elif data["count"] > data["max_expected"]:
self.unexpected_statuses["FAIL"] += 1
elif data["count"]:
self.expected_statuses["FAIL"] += 1
else:
self.expected_statuses["PASS"] += 1
def summarize(self):
return RunSummary(
dict(self.unexpected_statuses),

Просмотреть файл

@ -46,6 +46,11 @@ Allowed actions, and subfields:
command - Command line of the process
data - Output data from the process
assertion_count
count - Number of assertions produced
min_expected - Minimum expected number of assertions
max_expected - Maximum expected number of assertions
log
level [CRITICAL | ERROR | WARNING |
INFO | DEBUG] - level of the logging message
@ -422,6 +427,19 @@ class StructuredLogger(object):
"""
self._log_data("process_exit", data)
@log_action(TestId("test"),
Int("count"),
Int("min_expected"),
Int("max_expected"))
def assertion_count(self, data):
"""Log count of assertions produced when running a test.
:param count: - Number of assertions produced
:param min_expected: - Minimum expected number of assertions
:param max_expected: - Maximum expected number of assertions
"""
self._log_data("assertion_count", data)
def _log_func(level_name):
@log_action(Unicode("message"),