Bug 1558954 - Update the expected test metadata format to incude known intermittent statuses r=jgraham

Currently, the `expected` field in the test metadata accepts only one status. This patch adds
the ability to include known intermittent statuses in this metadata.

The existing metadata format is:
```
[test]
  [subtest]
    expected: PASS
```

This new format, if there is a known intermittent status to record, will be:
```
[test]
  [subtest]
    expected: [PASS, FAIL]
```

The first status will always be the "expected" status. The following statuses in the list are
"known intermittent" statuses. The statuses are ordered based on `tree.result_values` counts
during `build_tree()`.

Tests have been added to test_update.py to account for the following circumstances:
- an existing metadata file with only one expecte status needs updating with a known intermittent
- a file with an existing known intermittent needs to be updated with an additional status
- an intermittent status is now the expected status, and the expected status is now intermittent
- a new metadata file needs to be created with a known intermittent status

testrunner.py has been updated to exclude tests with a `known_intermittent` status from the
`unexpected_count`.

A test has been added to test_expectedtree.py to ensure the `tree.result_values` is counting
correctly.

Differential Revision: https://phabricator.services.mozilla.com/D37729

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Nikki Sharpley 2019-07-18 19:01:30 +00:00
Родитель 83b19f30bb
Коммит 75306e9450
9 изменённых файлов: 595 добавлений и 233 удалений

Просмотреть файл

@ -11,7 +11,7 @@ class Node(object):
# Populated for leaf nodes
self.run_info = set()
self.result_values = set()
self.result_values = defaultdict(int)
def add(self, node):
self.children.add(node)
@ -76,13 +76,15 @@ def build_tree(properties, dependent_props, results, tree=None):
prop_index = {prop: i for i, prop in enumerate(properties)}
all_results = set()
all_results = defaultdict(int)
for result_values in results.itervalues():
all_results |= result_values
for result_value, count in result_values.iteritems():
all_results[result_value] += count
# If there is only one result we are done
if not properties or len(all_results) == 1:
tree.result_values |= all_results
for value, count in all_results.iteritems():
tree.result_values[value] += count
tree.run_info |= set(results.keys())
return tree
@ -107,7 +109,8 @@ def build_tree(properties, dependent_props, results, tree=None):
# In the case that no properties partition the space
if not results_partitions:
tree.result_values |= all_results
for value, count in all_results.iteritems():
tree.result_values[value] += count
tree.run_info |= set(results.keys())
return tree

Просмотреть файл

@ -178,9 +178,12 @@ class ExpectedManifest(ManifestItem):
:param result: Total number of bytes leaked"""
self.update_properties.leak_threshold.set(run_info, result)
def update(self, stability, full_update):
def update(self, full_update, disable_intermittent, update_intermittent, remove_intermittent):
for prop_update in self.update_properties:
prop_update.update(stability, full_update)
prop_update.update(full_update,
disable_intermittent,
update_intermittent,
remove_intermittent)
class TestNode(ManifestItem):
@ -276,9 +279,12 @@ class TestNode(ManifestItem):
self.append(subtest)
return subtest
def update(self, stability, full_update):
def update(self, full_update, disable_intermittent, update_intermittent, remove_intermittent):
for prop_update in self.update_properties:
prop_update.update(stability, full_update)
prop_update.update(full_update,
disable_intermittent,
update_intermittent,
remove_intermittent)
class SubtestNode(TestNode):
@ -306,8 +312,9 @@ def build_conditional_tree(_, run_info_properties, results):
def build_unconditional_tree(_, run_info_properties, results):
root = expectedtree.Node(None, None)
for run_info, value in results.iteritems():
root.result_values |= value
for run_info, values in results.iteritems():
for value, count in values.iteritems():
root.result_values[value] += count
root.run_info.add(run_info)
return root
@ -322,7 +329,9 @@ class PropertyUpdate(object):
self.node = node
self.default_value = self.cls_default_value
self.has_result = False
self.results = defaultdict(set)
self.results = defaultdict(lambda: defaultdict(int))
self.update_intermittent = False
self.remove_intermittent = False
def run_info_by_condition(self, run_info_index, conditions):
run_info_by_condition = defaultdict(list)
@ -340,7 +349,7 @@ class PropertyUpdate(object):
self.node.has_result = True
self.check_default(value)
value = self.from_result_value(value)
self.results[run_info].add(value)
self.results[run_info][value] += 1
def check_default(self, result):
return
@ -373,7 +382,11 @@ class PropertyUpdate(object):
unconditional_value = self.default_value
return unconditional_value
def update(self, stability=None, full_update=False):
def update(self,
full_update=False,
disable_intermittent=None,
update_intermittent=False,
remove_intermittent=False):
"""Update the underlying manifest AST for this test based on all the
added results.
@ -384,8 +397,16 @@ class PropertyUpdate(object):
Conditionals not matched by any added result are not changed.
When `stability` is not None, disable any test that shows multiple
When `disable_intermittent` is not None, disable any test that shows multiple
unexpected results for the same set of parameters.
When `update_intermittent` is True, intermittent statuses will be recorded
as `expected` in the test metadata.
When `remove_intermittent` is True, old intermittent statuses will be removed
if no longer intermittent. This is only relevant if `update_intermittent` is
also True, because if False, the metadata will simply update one `expected`
status.
"""
if not self.has_result:
return
@ -393,12 +414,15 @@ class PropertyUpdate(object):
property_tree = self.property_builder(self.node.root.run_info_properties,
self.results)
conditions, errors = self.update_conditions(property_tree, full_update)
conditions, errors = self.update_conditions(property_tree,
full_update,
update_intermittent,
remove_intermittent)
for e in errors:
if stability:
if disable_intermittent:
condition = e.cond.children[0] if e.cond else None
msg = stability if isinstance(stability, (str, unicode)) else "unstable"
msg = disable_intermittent if isinstance(disable_intermittent, (str, unicode)) else "unstable"
self.node.set("disabled", msg, condition)
self.node.new_disabled = True
else:
@ -432,7 +456,11 @@ class PropertyUpdate(object):
self.to_ini_value(value),
condition)
def update_conditions(self, property_tree, full_update):
def update_conditions(self,
property_tree,
full_update,
update_intermittent,
remove_intermittent):
# This is complicated because the expected behaviour is complex
# The complexity arises from the fact that there are two ways of running
# the tool, with a full set of runs (full_update=True) or with partial metadata
@ -461,6 +489,8 @@ class PropertyUpdate(object):
# * Otherwise add conditionals for the run_info that doesn't match any
# remaining conditions
prev_default = None
self.update_intermittent = update_intermittent
self.remove_intermittent = remove_intermittent
current_conditions = self.node.get_conditions(self.property_name)
@ -474,6 +504,8 @@ class PropertyUpdate(object):
# value for all run_info, proceed as for a full update
if not current_conditions:
return self._update_conditions_full(property_tree,
update_intermittent,
remove_intermittent,
prev_default=prev_default)
conditions = []
@ -505,6 +537,8 @@ class PropertyUpdate(object):
conditions.append((condition.condition_node,
self.from_ini_value(condition.value)))
new_conditions, errors = self._update_conditions_full(property_tree,
update_intermittent,
remove_intermittent,
prev_default=prev_default)
conditions.extend(new_conditions)
return conditions, errors
@ -527,7 +561,7 @@ class PropertyUpdate(object):
nodes = [node_by_run_info[run_info] for run_info in run_infos
if run_info in node_by_run_info]
# If all the values are the same, update the value
if nodes and all(node.result_values == nodes[0].result_values for node in nodes):
if nodes and all(set(node.result_values.keys()) == set(nodes[0].result_values.keys()) for node in nodes):
current_value = self.from_ini_value(condition.value)
try:
new_value = self.updated_value(current_value,
@ -545,6 +579,8 @@ class PropertyUpdate(object):
new_conditions, new_errors = self.build_tree_conditions(property_tree,
run_info_with_condition,
update_intermittent,
remove_intermittent,
prev_default)
if new_conditions:
self.node.modified = True
@ -554,16 +590,32 @@ class PropertyUpdate(object):
return conditions, errors
def _update_conditions_full(self, property_tree, prev_default=None):
def _update_conditions_full(self,
property_tree,
update_intermittent,
remove_intermittent,
prev_default=None):
self.node.modified = True
conditions, errors = self.build_tree_conditions(property_tree, set(), prev_default)
conditions, errors = self.build_tree_conditions(property_tree,
set(),
update_intermittent,
remove_intermittent,
prev_default)
return conditions, errors
def build_tree_conditions(self, property_tree, run_info_with_condition, prev_default=None):
def build_tree_conditions(self,
property_tree,
run_info_with_condition,
update_intermittent,
remove_intermittent,
prev_default=None):
conditions = []
errors = []
self.update_intermittent = update_intermittent
self.remove_intermittent = remove_intermittent
value_count = defaultdict(int)
def to_count_value(v):
@ -650,10 +702,39 @@ class ExpectedUpdate(PropertyUpdate):
def from_result_value(self, result):
return result.status
def to_ini_value(self, value):
if isinstance(value, list):
return [str(item) for item in value]
return str(value)
def updated_value(self, current, new):
if len(new) > 1:
if len(new) > 1 and not self.update_intermittent and not isinstance(current, list):
raise ConditionError
return list(new)[0]
if not (self.update_intermittent or isinstance(current, list)):
return list(new)[0]
statuses = ["OK", "PASS", "FAIL", "ERROR", "TIMEOUT", "CRASH"]
status_priority = {value: i for i, value in enumerate(statuses)}
sorted_new = sorted(new.iteritems(), key=lambda x:(-1 * x[1],
status_priority.get(x[0],
len(status_priority))))
expected = [status for status, _ in sorted_new]
if self.update_intermittent:
if not self.remove_intermittent:
# If we are not removing existing recorded intermittents that don't
# appear in new, manually add them back in to expected.
if isinstance(current, list):
expected.extend([status for status in current if status not in expected])
if len(expected) == 1:
return expected[0]
return expected
# If nothing has changed and not self.update_intermittent, preserve existing
# intermittent.
if set(expected).issubset(set(current)):
return current
return expected[0]
class MaxAssertsUpdate(PropertyUpdate):

Просмотреть файл

@ -54,7 +54,7 @@ class RunInfo(object):
def update_expected(test_paths, serve_root, log_file_names,
update_properties, rev_old=None, rev_new="HEAD",
full_update=False, sync_root=None, stability=None):
full_update=False, sync_root=None, disable_intermittent=None):
"""Update the metadata files for web-platform-tests based on
the results obtained in a previous run or runs
@ -66,12 +66,12 @@ def update_expected(test_paths, serve_root, log_file_names,
for metadata_path, updated_ini in update_from_logs(id_test_map,
update_properties,
stability,
disable_intermittent,
full_update,
*log_file_names):
write_new_expected(metadata_path, updated_ini)
if stability:
if disable_intermittent:
for test in updated_ini.iterchildren():
for subtest in test.iterchildren():
if subtest.new_disabled:
@ -217,7 +217,7 @@ def load_test_data(test_paths):
return id_test_map
def update_from_logs(id_test_map, update_properties, stability, full_update,
def update_from_logs(id_test_map, update_properties, disable_intermittent, full_update,
*log_filenames):
updater = ExpectedUpdater(id_test_map)
@ -227,11 +227,16 @@ def update_from_logs(id_test_map, update_properties, stability, full_update,
with open(log_filename) as f:
updater.update_from_log(f)
for item in update_results(id_test_map, update_properties, stability, full_update):
for item in update_results(id_test_map, update_properties, disable_intermittent, full_update):
yield item
def update_results(id_test_map, update_properties, stability, full_update):
def update_results(id_test_map,
update_properties,
full_update,
disable_intermittent,
update_intermittent=False,
remove_intermittent=False):
test_file_items = set(id_test_map.itervalues())
default_expected_by_type = {}
@ -242,8 +247,9 @@ def update_results(id_test_map, update_properties, stability, full_update):
default_expected_by_type[(test_type, True)] = test_cls.subtest_result_cls.default_expected
for test_file in test_file_items:
updated_expected = test_file.update(default_expected_by_type, update_properties, stability,
full_update)
updated_expected = test_file.update(default_expected_by_type, update_properties,
full_update, disable_intermittent,
update_intermittent, remove_intermittent)
if updated_expected is not None and updated_expected.modified:
yield test_file.metadata_path, updated_expected
@ -334,11 +340,11 @@ class ExpectedUpdater(object):
"subtest": subtest["name"],
"status": subtest["status"],
"expected": subtest.get("expected"),
"known_intermittent": subtest.get("known_intermittent")})
"known_intermittent": subtest.get("known_intermittent", [])})
action_map["test_end"]({"test": test["test"],
"status": test["status"],
"expected": test.get("expected"),
"known_intermittent": test.get("known_intermittent")})
"known_intermittent": test.get("known_intermittent", [])})
if "asserts" in test:
asserts = test["asserts"]
action_map["assertion_count"]({"test": test["test"],
@ -587,7 +593,8 @@ class TestFileData(object):
return rv
def update(self, default_expected_by_type, update_properties,
stability=None, full_update=False):
full_update=False, disable_intermittent=None, update_intermittent=False,
remove_intermittent=False):
# If we are doing a full update, we may need to prune missing nodes
# even if the expectations didn't change
if not self.requires_update and not full_update:
@ -644,11 +651,20 @@ class TestFileData(object):
elif prop == "asserts":
item_expected.set_asserts(run_info, value)
expected.update(stability=stability, full_update=full_update)
expected.update(full_update=full_update,
disable_intermittent=disable_intermittent,
update_intermittent=update_intermittent,
remove_intermittent=remove_intermittent)
for test in expected.iterchildren():
for subtest in test.iterchildren():
subtest.update(stability=stability, full_update=full_update)
test.update(stability=stability, full_update=full_update)
subtest.update(full_update=full_update,
disable_intermittent=disable_intermittent,
update_intermittent=update_intermittent,
remove_intermittent=remove_intermittent)
test.update(full_update=full_update,
disable_intermittent=disable_intermittent,
update_intermittent=update_intermittent,
remove_intermittent=remove_intermittent)
return expected

Просмотреть файл

@ -561,7 +561,8 @@ class TestRunnerManager(threading.Thread):
if test.disabled(result.name):
continue
expected = test.expected(result.name)
is_unexpected = expected != result.status
known_intermittent = test.known_intermittent(result.name)
is_unexpected = expected != result.status and result.status not in known_intermittent
if is_unexpected:
self.unexpected_count += 1
@ -572,6 +573,7 @@ class TestRunnerManager(threading.Thread):
result.status,
message=result.message,
expected=expected,
known_intermittent=known_intermittent,
stack=result.stack)
# We have a couple of status codes that are used internally, but not exposed to the
@ -583,13 +585,14 @@ class TestRunnerManager(threading.Thread):
status_subns = {"INTERNAL-ERROR": "ERROR",
"EXTERNAL-TIMEOUT": "TIMEOUT"}
expected = test.expected()
known_intermittent = test.known_intermittent()
status = status_subns.get(file_result.status, file_result.status)
if self.browser.check_crash(test.id) and status != "CRASH":
self.logger.info("Found a crash dump; should change status from %s to CRASH but this causes instability" % (status,))
self.test_count += 1
is_unexpected = expected != status
is_unexpected = expected != status and status not in known_intermittent
if is_unexpected:
self.unexpected_count += 1
self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
@ -608,6 +611,7 @@ class TestRunnerManager(threading.Thread):
status,
message=file_result.message,
expected=expected,
known_intermittent=known_intermittent,
extra=file_result.extra,
stack=file_result.stack)

Просмотреть файл

@ -3,6 +3,8 @@ import sys
import pytest
from .. import expectedtree, metadata
from collections import defaultdict
def dump_tree(tree):
rv = []
@ -18,15 +20,22 @@ def dump_tree(tree):
rv.append("%s<%s>" % (prefix, data))
for child in sorted(node.children, key=lambda x:x.value):
dump_node(child, indent + 2)
dump_node(tree)
return "\n".join(rv)
def results_object(results):
results_obj = defaultdict(lambda: defaultdict(int))
for run_info, status in results:
run_info = metadata.RunInfo(run_info)
results_obj[run_info][status] += 1
return results_obj
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_build_tree_0():
# Pass iff debug
# Pass if debug
results = [({"os": "linux", "version": "18.04", "debug": True}, "FAIL"),
({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
({"os": "linux", "version": "16.04", "debug": False}, "PASS"),
@ -34,8 +43,8 @@ def test_build_tree_0():
({"os": "mac", "version": "10.12", "debug": False}, "PASS"),
({"os": "win", "version": "7", "debug": False}, "PASS"),
({"os": "win", "version": "10", "debug": False}, "PASS")]
results = {metadata.RunInfo(run_info): set([status]) for run_info, status in results}
tree = expectedtree.build_tree(["os", "version", "debug"], {}, results)
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "version", "debug"], {}, results_obj)
expected = """<root>
<debug:False result_values:PASS>
@ -55,8 +64,8 @@ def test_build_tree_1():
({"os": "mac", "version": "10.12", "debug": False}, "FAIL"),
({"os": "win", "version": "7", "debug": False}, "FAIL"),
({"os": "win", "version": "10", "debug": False}, "PASS")]
results = {metadata.RunInfo(run_info): set([status]) for run_info, status in results}
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results)
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
expected = """<root>
<os:linux result_values:PASS>
@ -80,8 +89,8 @@ def test_build_tree_2():
({"os": "mac", "version": "10.12", "debug": False}, "PASS"),
({"os": "win", "version": "7", "debug": False}, "PASS"),
({"os": "win", "version": "10", "debug": False}, "PASS")]
results = {metadata.RunInfo(run_info): set([status]) for run_info, status in results}
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results)
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
expected = """<root>
<os:linux>
@ -101,9 +110,21 @@ def test_build_tree_3():
results = [({"os": "linux", "version": "18.04", "debug": True, "unused": False}, "PASS"),
({"os": "linux", "version": "18.04", "debug": True, "unused": True}, "FAIL")]
results = {metadata.RunInfo(run_info): set([status]) for run_info, status in results}
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results)
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
expected = """<root result_values:FAIL,PASS>"""
assert dump_tree(tree) == expected
def test_build_tree_4():
# Check counts for multiple statuses
results = [({"os": "linux", "version": "18.04", "debug": False}, "FAIL"),
({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
({"os": "linux", "version": "18.04", "debug": False}, "PASS")]
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "version", "debug"], {}, results_obj)
assert tree.result_values["PASS"] == 2
assert tree.result_values["FAIL"] == 1

Просмотреть файл

@ -39,6 +39,8 @@ item_classes = {"testharness": manifest_item.TestharnessTest,
default_run_info = {"debug": False, "os": "linux", "version": "18.04", "processor": "x86_64", "bits": 64}
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
def reset_globals():
@ -55,7 +57,9 @@ def get_run_info(overrides):
def update(tests, *logs, **kwargs):
full_update = kwargs.pop("full_update", False)
stability = kwargs.pop("stability", False)
disable_intermittent = kwargs.pop("disable_intermittent", False)
update_intermittent = kwargs.pop("update_intermittent", False)
remove_intermittent = kwargs.pop("remove_intermittent", False)
assert not kwargs
id_test_map, updater = create_updater(tests)
@ -76,8 +80,10 @@ def update(tests, *logs, **kwargs):
return list(metadata.update_results(id_test_map,
update_properties,
stability,
full_update))
full_update,
disable_intermittent,
update_intermittent,
remove_intermittent))
def create_updater(tests, url_base="/", **kwargs):
@ -129,7 +135,7 @@ def create_test_manifest(tests, url_base="/"):
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_0():
tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness",
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
@ -151,7 +157,6 @@ def test_update_0():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_1():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
@ -172,10 +177,407 @@ def test_update_1():
assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_known_intermittent_1():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: PASS""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
def test_update_known_intermittent_2():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: PASS""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "FAIL"
def test_update_existing_known_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "ERROR", "FAIL"]
def test_update_remove_previous_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests,
log_0,
log_1,
log_2,
update_intermittent=True,
remove_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "ERROR"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_new_test_with_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test("test.htm") is None
assert len(new_manifest.get_test(test_id).children) == 1
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_expected_tie_resolution():
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS"}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["PASS", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_reorder_expected():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_1 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
log_2 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == ["FAIL", "PASS"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_and_preserve_unchanged_expected_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]
FAIL""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "FAIL",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_2 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "FAIL"})])
updated = update(tests, log_0, log_1, log_2)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "FAIL"]
assert new_manifest.get_test(test_id).get(
"expected", default_run_info) == "PASS"
def test_update_test_with_intermittent_to_one_expected_status():
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
"subtest": "test1",
"status": "ERROR",
"expected": "PASS",
"known_intermittent": ["FAIL"]}),
("test_end", {"test": test_id,
"status": "OK"})])
updated = update(tests, log_0)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).children[0].get(
"expected", default_run_info) == "ERROR"
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_intermittent_with_conditions():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "TIMEOUT",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
updated = update(tests, log_0, log_1, update_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "TIMEOUT", "FAIL"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_and_remove_intermittent_with_conditions():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
if os == "android": [PASS, FAIL]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "TIMEOUT",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
log_1 = suite_log([("test_start", {"test": test_id}),
("test_end", {"test": test_id,
"status": "PASS",
"expected": "PASS",
"known_intermittent": ["FAIL"]})],
run_info={"os": "android"})
updated = update(tests, log_0, log_1, update_intermittent=True, remove_intermittent=True)
new_manifest = updated[0][1]
assert not new_manifest.is_empty
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "android"})
assert not new_manifest.is_empty
assert new_manifest.get_test(test_id).get(
"expected", run_info_1) == ["PASS", "TIMEOUT"]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_skip_0():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
@ -196,7 +598,6 @@ def test_skip_0():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_new_subtest():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
@ -222,7 +623,6 @@ def test_new_subtest():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_0():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
@ -262,7 +662,6 @@ def test_update_multiple_0():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_1():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
@ -307,7 +706,6 @@ def test_update_multiple_1():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_2():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected: FAIL""")]
@ -348,7 +746,6 @@ def test_update_multiple_2():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_multiple_3():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
@ -391,7 +788,6 @@ def test_update_multiple_3():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_ignore_existing():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
@ -434,8 +830,7 @@ def test_update_ignore_existing():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_new_test():
test_id = "/path/to/test.html"
tests = [("path/to/test.html", [test_id], "testharness", None)]
tests = [("path/to/test.htm", [test_id], "testharness", None)]
log_0 = suite_log([("test_start", {"test": test_id}),
("test_status", {"test": test_id,
@ -450,7 +845,7 @@ def test_update_new_test():
run_info_1 = default_run_info.copy()
assert not new_manifest.is_empty
assert new_manifest.get_test("test.html") is None
assert new_manifest.get_test("test.htm") is None
assert len(new_manifest.get_test(test_id).children) == 1
assert new_manifest.get_test(test_id).children[0].get(
"expected", run_info_1) == "FAIL"
@ -459,7 +854,6 @@ def test_update_new_test():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_duplicate():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
@ -481,8 +875,7 @@ def test_update_duplicate():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_stability():
test_id = "/path/to/test.htm"
def test_update_disable_intermittent():
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
@ -494,7 +887,7 @@ def test_update_stability():
("test_end", {"test": test_id,
"status": "FAIL"})])
updated = update(tests, log_0, log_1, stability="Some message")
updated = update(tests, log_0, log_1, disable_intermittent="Some message")
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
@ -505,7 +898,6 @@ def test_update_stability():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_stability_conditional_instability():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected: ERROR""")]
@ -523,7 +915,7 @@ def test_update_stability_conditional_instability():
"status": "FAIL"})],
run_info={"os": "mac"})
updated = update(tests, log_0, log_1, log_2, stability="Some message")
updated = update(tests, log_0, log_1, log_2, disable_intermittent="Some message")
new_manifest = updated[0][1]
run_info_1 = default_run_info.copy()
run_info_1.update({"os": "linux"})
@ -542,7 +934,6 @@ def test_update_stability_conditional_instability():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_full():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
@ -595,7 +986,6 @@ def test_update_full():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_full_unknown():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
@ -640,7 +1030,6 @@ def test_update_full_unknown():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
[test1]
expected:
@ -674,7 +1063,6 @@ def test_update_default():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default_1():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
@ -707,7 +1095,6 @@ def test_update_default_1():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_default_2():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """
[test.htm]
expected:
@ -740,7 +1127,6 @@ def test_update_default_2():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_0():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
@ -765,7 +1151,6 @@ def test_update_assertion_count_0():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_1():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
@ -790,7 +1175,6 @@ def test_update_assertion_count_1():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_2():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
@ -811,7 +1195,6 @@ def test_update_assertion_count_2():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_3():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
max-asserts: 4
min-asserts: 2
@ -846,7 +1229,6 @@ def test_update_assertion_count_3():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_assertion_count_4():
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]""")]
log_0 = suite_log([("test_start", {"test": test_id}),
@ -878,8 +1260,6 @@ def test_update_assertion_count_4():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_0():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
@ -897,8 +1277,6 @@ def test_update_lsan_0():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_1():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
lsan-allowed: [foo]""")]
@ -919,8 +1297,6 @@ lsan-allowed: [foo]""")]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_2():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/__dir__", ["path/__dir__"], None, """
lsan-allowed: [foo]"""),
@ -943,8 +1319,6 @@ lsan-allowed: [foo]"""),
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_lsan_3():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
@ -967,7 +1341,7 @@ def test_update_lsan_3():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_wptreport_0():
tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness",
tests = [("path/to/test.htm", [test_id], "testharness",
"""[test.htm]
[test1]
expected: FAIL""")]
@ -990,8 +1364,8 @@ def test_update_wptreport_0():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_wptreport_1():
tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness", ""),
("path/to/__dir__", ["path/to/__dir__"], None, "")]
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
log = {"run_info": default_run_info.copy(),
"results": [],
@ -1007,8 +1381,6 @@ def test_update_wptreport_1():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_0():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
@ -1028,8 +1400,6 @@ def test_update_leak_total_0():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_1():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, "")]
@ -1046,8 +1416,6 @@ def test_update_leak_total_1():
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_2():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 110""")]
@ -1065,8 +1433,6 @@ leak-total: 110""")]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_3():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 100""")]
@ -1087,8 +1453,6 @@ leak-total: 100""")]
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_update_leak_total_4():
test_id = "/path/to/test.htm"
dir_id = "path/to/__dir__"
tests = [("path/to/test.htm", [test_id], "testharness", ""),
("path/to/__dir__", [dir_id], None, """
leak-total: 110""")]
@ -1109,135 +1473,8 @@ leak-total: 110""")]
assert new_manifest.has_key("leak-threshold") is False
def dump_tree(tree):
rv = []
def dump_node(node, indent=0):
prefix = " " * indent
if not node.prop:
data = "root"
else:
data = "%s:%s" % (node.prop, node.value)
if node.update_values:
data += " update_values:%s" % (",".join(sorted(node.update_values)))
rv.append("%s<%s>" % (prefix, data))
for child in sorted(node.children, key=lambda x:x.value):
dump_node(child, indent + 2)
dump_node(tree)
return "\n".join(rv)
# @pytest.mark.xfail(sys.version[0] == "3",
# reason="metadata doesn't support py3")
# def test_property_tree():
# run_info_values = [{"os": "linux", "version": "18.04", "debug": False},
# {"os": "linux", "version": "18.04", "debug": True},
# {"os": "linux", "version": "16.04", "debug": False},
# {"os": "mac", "version": "10.12", "debug": True},
# {"os": "mac", "version": "10.12", "debug": False},
# {"os": "win", "version": "7", "debug": False},
# {"os": "win", "version": "10", "debug": False}]
# run_info_values = [metadata.RunInfo(item) for item in run_info_values]
# tree = metadata.build_property_tree(["os", "version", "debug"],
# run_info_values)
# expected = """<root>
# <os:linux>
# <version:16.04>
# <version:18.04>
# <debug:False>
# <debug:True>
# <os:mac>
# <debug:False>
# <debug:True>
# <os:win>
# <version:10>
# <version:7>"""
# assert dump_tree(tree) == expected
# @pytest.mark.xfail(sys.version[0] == "3",
# reason="metadata doesn't support py3")
# def test_propogate_up():
# update_values = [({"os": "linux", "version": "18.04", "debug": False}, "FAIL"),
# ({"os": "linux", "version": "18.04", "debug": True}, "FAIL"),
# ({"os": "linux", "version": "16.04", "debug": False}, "FAIL"),
# ({"os": "mac", "version": "10.12", "debug": True}, "PASS"),
# ({"os": "mac", "version": "10.12", "debug": False}, "PASS"),
# ({"os": "win", "version": "7", "debug": False}, "PASS"),
# ({"os": "win", "version": "10", "debug": False}, "FAIL")]
# update_values = {metadata.RunInfo(item[0]): item[1] for item in update_values}
# tree = metadata.build_property_tree(["os", "version", "debug"],
# update_values.keys())
# for node in tree:
# for run_info in node.run_info:
# node.update_values.add(update_values[run_info])
# optimiser = manifestupdate.OptimiseConditionalTree()
# optimiser.propogate_up(tree)
# expected = """<root>
# <os:linux update_values:FAIL>
# <os:mac update_values:PASS>
# <os:win>
# <version:10 update_values:FAIL>
# <version:7 update_values:PASS>"""
# assert dump_tree(tree) == expected
# @pytest.mark.xfail(sys.version[0] == "3",
# reason="metadata doesn't support py3")
# def test_common_properties():
# update_values = [({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
# ({"os": "linux", "version": "18.04", "debug": True}, "FAIL"),
# ({"os": "linux", "version": "16.04", "debug": False}, "PASS"),
# ({"os": "mac", "version": "10.12", "debug": True}, "FAIL"),
# ({"os": "mac", "version": "10.12", "debug": False}, "PASS"),
# ({"os": "win", "version": "7", "debug": False}, "PASS"),
# ({"os": "win", "version": "10", "debug": False}, "PASS")]
# update_values = {metadata.RunInfo(item[0]): item[1] for item in update_values}
# tree = metadata.build_property_tree(["os", "version", "debug"],
# update_values.keys())
# for node in tree:
# for run_info in node.run_info:
# node.update_values.add(update_values[run_info])
# optimiser = manifestupdate.OptimiseConditionalTree()
# optimiser.propogate_up(tree)
# expected = """<root>
# <os:linux>
# <version:16.04 update_values:PASS>
# <version:18.04>
# <debug:False update_values:PASS>
# <debug:True update_values:FAIL>
# <os:mac>
# <debug:False update_values:PASS>
# <debug:True update_values:FAIL>
# <os:win update_values:PASS>"""
# assert dump_tree(tree) == expected
# optimiser.common_properties(tree)
# expected = """<root>
# <os:linux>
# <debug:False update_values:PASS>
# <debug:True update_values:FAIL>
# <os:mac update_values:PASS>
# <debug:False update_values:PASS>
# <debug:True update_values:FAIL>
# <os:win update_values: PASS>"""
# assert dump_tree(tree) == expected
class TestStep(Step):
def create(self, state):
test_id = "/path/to/test.htm"
tests = [("path/to/test.htm", [test_id], "testharness", "")]
state.foo = create_test_manifest(tests)

Просмотреть файл

@ -28,7 +28,7 @@ class UpdateExpected(Step):
rev_old=None,
full_update=state.full_update,
sync_root=sync_root,
stability=state.stability)
disable_intermittent=state.disable_intermittent)
class CreateMetadataPatch(Step):

Просмотреть файл

@ -87,7 +87,7 @@ class UpdateMetadata(Step):
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "paths", "serve_root"]):
state.run_log = kwargs["run_log"]
state.stability = kwargs["stability"]
state.disable_intermittent = kwargs["disable_intermittent"]
state.patch = kwargs["patch"]
state.suite_name = kwargs["suite_name"]
state.product = kwargs["product"]

Просмотреть файл

@ -611,7 +611,7 @@ def create_parser_update(product_choices=None):
help="Sync the tests with the latest from upstream (implies --patch)")
parser.add_argument("--full", action="store_true", default=False,
help=("For all tests that are updated, remove any existing conditions and missing subtests"))
parser.add_argument("--stability", nargs="?", action="store", const="unstable", default=None,
parser.add_argument("--disable_intermittent", nargs="?", action="store", const="unstable", default=None,
help=("Reason for disabling tests. When updating test results, disable tests that have "
"inconsistent results across many runs with the given reason."))
parser.add_argument("--no-remove-obsolete", action="store_false", dest="remove_obsolete", default=True,