Bug 1808288 - ride along: lint: fix some windows CR r=linter-reviewers,marco

Depends on D165849

Differential Revision: https://phabricator.services.mozilla.com/D165850
This commit is contained in:
Sylvestre Ledru 2023-01-03 14:54:40 +00:00
Родитель 9014e2eb4b
Коммит 8c1ca7d7bc
8 изменённых файлов: 727 добавлений и 727 удалений

Просмотреть файл

@ -31,7 +31,7 @@ def WebIDLTest(parser, harness):
interface QNameDerived : QNameBase {
attribute long long foo;
attribute byte bar;
attribute byte bar;
};
"""
)

Просмотреть файл

@ -1,22 +1,22 @@
# coding=utf8
# Any copyright is dedicated to the Public Domain.
# http://creativecommons.org/publicdomain/zero/1.0/
from fluent.migrate.helpers import transforms_from
from fluent.migrate import COPY_PATTERN
def migrate(ctx):
"""Bug 1577257 - Share logic behind panel headers across the UI, part {index}"""
ctx.add_transforms(
"browser/browser/appmenu.ftl",
"browser/browser/appmenu.ftl",
transforms_from(
"""
profiler-popup-header-text =
{ COPY_PATTERN(from_path, "profiler-popup-title.value") }
""",
from_path="browser/browser/appmenu.ftl",
),
)
# coding=utf8
# Any copyright is dedicated to the Public Domain.
# http://creativecommons.org/publicdomain/zero/1.0/
from fluent.migrate.helpers import transforms_from
from fluent.migrate import COPY_PATTERN
def migrate(ctx):
"""Bug 1577257 - Share logic behind panel headers across the UI, part {index}"""
ctx.add_transforms(
"browser/browser/appmenu.ftl",
"browser/browser/appmenu.ftl",
transforms_from(
"""
profiler-popup-header-text =
{ COPY_PATTERN(from_path, "profiler-popup-title.value") }
""",
from_path="browser/browser/appmenu.ftl",
),
)

Просмотреть файл

@ -1,287 +1,287 @@
import math
import mozinfo
class Bisect(object):
"Class for creating, bisecting and summarizing for --bisect-chunk option."
def __init__(self, harness):
super(Bisect, self).__init__()
self.summary = []
self.contents = {}
self.repeat = 10
self.failcount = 0
self.max_failures = 3
def setup(self, tests):
"""This method is used to initialize various variables that are required
for test bisection"""
status = 0
self.contents.clear()
# We need totalTests key in contents for sanity check
self.contents["totalTests"] = tests
self.contents["tests"] = tests
self.contents["loop"] = 0
return status
def reset(self, expectedError, result):
"""This method is used to initialize self.expectedError and self.result
for each loop in runtests."""
self.expectedError = expectedError
self.result = result
def get_tests_for_bisection(self, options, tests):
"""Make a list of tests for bisection from a given list of tests"""
bisectlist = []
for test in tests:
bisectlist.append(test)
if test.endswith(options.bisectChunk):
break
return bisectlist
def pre_test(self, options, tests, status):
"""This method is used to call other methods for setting up variables and
getting the list of tests for bisection."""
if options.bisectChunk == "default":
return tests
# The second condition in 'if' is required to verify that the failing
# test is the last one.
elif "loop" not in self.contents or not self.contents["tests"][-1].endswith(
options.bisectChunk
):
tests = self.get_tests_for_bisection(options, tests)
status = self.setup(tests)
return self.next_chunk_binary(options, status)
def post_test(self, options, expectedError, result):
"""This method is used to call other methods to summarize results and check whether a
sanity check is done or not."""
self.reset(expectedError, result)
status = self.summarize_chunk(options)
# Check whether sanity check has to be done. Also it is necessary to check whether
# options.bisectChunk is present in self.expectedError as we do not want to run
# if it is "default".
if status == -1 and options.bisectChunk in self.expectedError:
# In case we have a debug build, we don't want to run a sanity
# check, will take too much time.
if mozinfo.info["debug"]:
return status
testBleedThrough = self.contents["testsToRun"][0]
tests = self.contents["totalTests"]
tests.remove(testBleedThrough)
# To make sure that the failing test is dependent on some other
# test.
if options.bisectChunk in testBleedThrough:
return status
status = self.setup(tests)
self.summary.append("Sanity Check:")
return status
def next_chunk_reverse(self, options, status):
"This method is used to bisect the tests in a reverse search fashion."
# Base Cases.
if self.contents["loop"] <= 1:
self.contents["testsToRun"] = self.contents["tests"]
if self.contents["loop"] == 1:
self.contents["testsToRun"] = [self.contents["tests"][-1]]
self.contents["loop"] += 1
return self.contents["testsToRun"]
if "result" in self.contents:
if self.contents["result"] == "PASS":
chunkSize = self.contents["end"] - self.contents["start"]
self.contents["end"] = self.contents["start"] - 1
self.contents["start"] = self.contents["end"] - chunkSize
# self.contents['result'] will be expected error only if it fails.
elif self.contents["result"] == "FAIL":
self.contents["tests"] = self.contents["testsToRun"]
status = 1 # for initializing
# initialize
if status:
totalTests = len(self.contents["tests"])
chunkSize = int(math.ceil(totalTests / 10.0))
self.contents["start"] = totalTests - chunkSize - 1
self.contents["end"] = totalTests - 2
start = self.contents["start"]
end = self.contents["end"] + 1
self.contents["testsToRun"] = self.contents["tests"][start:end]
self.contents["testsToRun"].append(self.contents["tests"][-1])
self.contents["loop"] += 1
return self.contents["testsToRun"]
def next_chunk_binary(self, options, status):
"This method is used to bisect the tests in a binary search fashion."
# Base cases.
if self.contents["loop"] <= 1:
self.contents["testsToRun"] = self.contents["tests"]
if self.contents["loop"] == 1:
self.contents["testsToRun"] = [self.contents["tests"][-1]]
self.contents["loop"] += 1
return self.contents["testsToRun"]
# Initialize the contents dict.
if status:
totalTests = len(self.contents["tests"])
self.contents["start"] = 0
self.contents["end"] = totalTests - 2
# pylint --py3k W1619
mid = (self.contents["start"] + self.contents["end"]) / 2
if "result" in self.contents:
if self.contents["result"] == "PASS":
self.contents["end"] = mid
elif self.contents["result"] == "FAIL":
self.contents["start"] = mid + 1
mid = (self.contents["start"] + self.contents["end"]) / 2
start = mid + 1
end = self.contents["end"] + 1
self.contents["testsToRun"] = self.contents["tests"][start:end]
if not self.contents["testsToRun"]:
self.contents["testsToRun"].append(self.contents["tests"][mid])
self.contents["testsToRun"].append(self.contents["tests"][-1])
self.contents["loop"] += 1
return self.contents["testsToRun"]
def summarize_chunk(self, options):
"This method is used summarize the results after the list of tests is run."
if options.bisectChunk == "default":
# if no expectedError that means all the tests have successfully
# passed.
if len(self.expectedError) == 0:
return -1
options.bisectChunk = self.expectedError.keys()[0]
self.summary.append("\tFound Error in test: %s" % options.bisectChunk)
return 0
# If options.bisectChunk is not in self.result then we need to move to
# the next run.
if options.bisectChunk not in self.result:
return -1
self.summary.append("\tPass %d:" % self.contents["loop"])
if len(self.contents["testsToRun"]) > 1:
self.summary.append(
"\t\t%d test files(start,end,failing). [%s, %s, %s]"
% (
len(self.contents["testsToRun"]),
self.contents["testsToRun"][0],
self.contents["testsToRun"][-2],
self.contents["testsToRun"][-1],
)
)
else:
self.summary.append("\t\t1 test file [%s]" % self.contents["testsToRun"][0])
return self.check_for_intermittent(options)
if self.result[options.bisectChunk] == "PASS":
self.summary.append("\t\tno failures found.")
if self.contents["loop"] == 1:
status = -1
else:
self.contents["result"] = "PASS"
status = 0
elif self.result[options.bisectChunk] == "FAIL":
if "expectedError" not in self.contents:
self.summary.append("\t\t%s failed." % self.contents["testsToRun"][-1])
self.contents["expectedError"] = self.expectedError[options.bisectChunk]
status = 0
elif (
self.expectedError[options.bisectChunk]
== self.contents["expectedError"]
):
self.summary.append(
"\t\t%s failed with expected error."
% self.contents["testsToRun"][-1]
)
self.contents["result"] = "FAIL"
status = 0
# This code checks for test-bleedthrough. Should work for any
# algorithm.
numberOfTests = len(self.contents["testsToRun"])
if numberOfTests < 3:
# This means that only 2 tests are run. Since the last test
# is the failing test itself therefore the bleedthrough
# test is the first test
self.summary.append(
"TEST-UNEXPECTED-FAIL | %s | Bleedthrough detected, this test is the "
"root cause for many of the above failures"
% self.contents["testsToRun"][0]
)
status = -1
else:
self.summary.append(
"\t\t%s failed with different error."
% self.contents["testsToRun"][-1]
)
status = -1
return status
def check_for_intermittent(self, options):
"This method is used to check whether a test is an intermittent."
if self.result[options.bisectChunk] == "PASS":
self.summary.append(
"\t\tThe test %s passed." % self.contents["testsToRun"][0]
)
if self.repeat > 0:
# loop is set to 1 to again run the single test.
self.contents["loop"] = 1
self.repeat -= 1
return 0
else:
if self.failcount > 0:
# -1 is being returned as the test is intermittent, so no need to bisect
# further.
return -1
# If the test does not fail even once, then proceed to next chunk for bisection.
# loop is set to 2 to proceed on bisection.
self.contents["loop"] = 2
return 1
elif self.result[options.bisectChunk] == "FAIL":
self.summary.append(
"\t\tThe test %s failed." % self.contents["testsToRun"][0]
)
self.failcount += 1
self.contents["loop"] = 1
self.repeat -= 1
# self.max_failures is the maximum number of times a test is allowed
# to fail to be called an intermittent. If a test fails more than
# limit set, it is a perma-fail.
if self.failcount < self.max_failures:
if self.repeat == 0:
# -1 is being returned as the test is intermittent, so no need to bisect
# further.
return -1
return 0
else:
self.summary.append(
"TEST-UNEXPECTED-FAIL | %s | Bleedthrough detected, this test is the "
"root cause for many of the above failures"
% self.contents["testsToRun"][0]
)
return -1
def print_summary(self):
"This method is used to print the recorded summary."
print("Bisection summary:")
for line in self.summary:
print(line)
import math
import mozinfo
class Bisect(object):
"Class for creating, bisecting and summarizing for --bisect-chunk option."
def __init__(self, harness):
super(Bisect, self).__init__()
self.summary = []
self.contents = {}
self.repeat = 10
self.failcount = 0
self.max_failures = 3
def setup(self, tests):
"""This method is used to initialize various variables that are required
for test bisection"""
status = 0
self.contents.clear()
# We need totalTests key in contents for sanity check
self.contents["totalTests"] = tests
self.contents["tests"] = tests
self.contents["loop"] = 0
return status
def reset(self, expectedError, result):
"""This method is used to initialize self.expectedError and self.result
for each loop in runtests."""
self.expectedError = expectedError
self.result = result
def get_tests_for_bisection(self, options, tests):
"""Make a list of tests for bisection from a given list of tests"""
bisectlist = []
for test in tests:
bisectlist.append(test)
if test.endswith(options.bisectChunk):
break
return bisectlist
def pre_test(self, options, tests, status):
"""This method is used to call other methods for setting up variables and
getting the list of tests for bisection."""
if options.bisectChunk == "default":
return tests
# The second condition in 'if' is required to verify that the failing
# test is the last one.
elif "loop" not in self.contents or not self.contents["tests"][-1].endswith(
options.bisectChunk
):
tests = self.get_tests_for_bisection(options, tests)
status = self.setup(tests)
return self.next_chunk_binary(options, status)
def post_test(self, options, expectedError, result):
"""This method is used to call other methods to summarize results and check whether a
sanity check is done or not."""
self.reset(expectedError, result)
status = self.summarize_chunk(options)
# Check whether sanity check has to be done. Also it is necessary to check whether
# options.bisectChunk is present in self.expectedError as we do not want to run
# if it is "default".
if status == -1 and options.bisectChunk in self.expectedError:
# In case we have a debug build, we don't want to run a sanity
# check, will take too much time.
if mozinfo.info["debug"]:
return status
testBleedThrough = self.contents["testsToRun"][0]
tests = self.contents["totalTests"]
tests.remove(testBleedThrough)
# To make sure that the failing test is dependent on some other
# test.
if options.bisectChunk in testBleedThrough:
return status
status = self.setup(tests)
self.summary.append("Sanity Check:")
return status
def next_chunk_reverse(self, options, status):
"This method is used to bisect the tests in a reverse search fashion."
# Base Cases.
if self.contents["loop"] <= 1:
self.contents["testsToRun"] = self.contents["tests"]
if self.contents["loop"] == 1:
self.contents["testsToRun"] = [self.contents["tests"][-1]]
self.contents["loop"] += 1
return self.contents["testsToRun"]
if "result" in self.contents:
if self.contents["result"] == "PASS":
chunkSize = self.contents["end"] - self.contents["start"]
self.contents["end"] = self.contents["start"] - 1
self.contents["start"] = self.contents["end"] - chunkSize
# self.contents['result'] will be expected error only if it fails.
elif self.contents["result"] == "FAIL":
self.contents["tests"] = self.contents["testsToRun"]
status = 1 # for initializing
# initialize
if status:
totalTests = len(self.contents["tests"])
chunkSize = int(math.ceil(totalTests / 10.0))
self.contents["start"] = totalTests - chunkSize - 1
self.contents["end"] = totalTests - 2
start = self.contents["start"]
end = self.contents["end"] + 1
self.contents["testsToRun"] = self.contents["tests"][start:end]
self.contents["testsToRun"].append(self.contents["tests"][-1])
self.contents["loop"] += 1
return self.contents["testsToRun"]
def next_chunk_binary(self, options, status):
"This method is used to bisect the tests in a binary search fashion."
# Base cases.
if self.contents["loop"] <= 1:
self.contents["testsToRun"] = self.contents["tests"]
if self.contents["loop"] == 1:
self.contents["testsToRun"] = [self.contents["tests"][-1]]
self.contents["loop"] += 1
return self.contents["testsToRun"]
# Initialize the contents dict.
if status:
totalTests = len(self.contents["tests"])
self.contents["start"] = 0
self.contents["end"] = totalTests - 2
# pylint --py3k W1619
mid = (self.contents["start"] + self.contents["end"]) / 2
if "result" in self.contents:
if self.contents["result"] == "PASS":
self.contents["end"] = mid
elif self.contents["result"] == "FAIL":
self.contents["start"] = mid + 1
mid = (self.contents["start"] + self.contents["end"]) / 2
start = mid + 1
end = self.contents["end"] + 1
self.contents["testsToRun"] = self.contents["tests"][start:end]
if not self.contents["testsToRun"]:
self.contents["testsToRun"].append(self.contents["tests"][mid])
self.contents["testsToRun"].append(self.contents["tests"][-1])
self.contents["loop"] += 1
return self.contents["testsToRun"]
def summarize_chunk(self, options):
"This method is used summarize the results after the list of tests is run."
if options.bisectChunk == "default":
# if no expectedError that means all the tests have successfully
# passed.
if len(self.expectedError) == 0:
return -1
options.bisectChunk = self.expectedError.keys()[0]
self.summary.append("\tFound Error in test: %s" % options.bisectChunk)
return 0
# If options.bisectChunk is not in self.result then we need to move to
# the next run.
if options.bisectChunk not in self.result:
return -1
self.summary.append("\tPass %d:" % self.contents["loop"])
if len(self.contents["testsToRun"]) > 1:
self.summary.append(
"\t\t%d test files(start,end,failing). [%s, %s, %s]"
% (
len(self.contents["testsToRun"]),
self.contents["testsToRun"][0],
self.contents["testsToRun"][-2],
self.contents["testsToRun"][-1],
)
)
else:
self.summary.append("\t\t1 test file [%s]" % self.contents["testsToRun"][0])
return self.check_for_intermittent(options)
if self.result[options.bisectChunk] == "PASS":
self.summary.append("\t\tno failures found.")
if self.contents["loop"] == 1:
status = -1
else:
self.contents["result"] = "PASS"
status = 0
elif self.result[options.bisectChunk] == "FAIL":
if "expectedError" not in self.contents:
self.summary.append("\t\t%s failed." % self.contents["testsToRun"][-1])
self.contents["expectedError"] = self.expectedError[options.bisectChunk]
status = 0
elif (
self.expectedError[options.bisectChunk]
== self.contents["expectedError"]
):
self.summary.append(
"\t\t%s failed with expected error."
% self.contents["testsToRun"][-1]
)
self.contents["result"] = "FAIL"
status = 0
# This code checks for test-bleedthrough. Should work for any
# algorithm.
numberOfTests = len(self.contents["testsToRun"])
if numberOfTests < 3:
# This means that only 2 tests are run. Since the last test
# is the failing test itself therefore the bleedthrough
# test is the first test
self.summary.append(
"TEST-UNEXPECTED-FAIL | %s | Bleedthrough detected, this test is the "
"root cause for many of the above failures"
% self.contents["testsToRun"][0]
)
status = -1
else:
self.summary.append(
"\t\t%s failed with different error."
% self.contents["testsToRun"][-1]
)
status = -1
return status
def check_for_intermittent(self, options):
"This method is used to check whether a test is an intermittent."
if self.result[options.bisectChunk] == "PASS":
self.summary.append(
"\t\tThe test %s passed." % self.contents["testsToRun"][0]
)
if self.repeat > 0:
# loop is set to 1 to again run the single test.
self.contents["loop"] = 1
self.repeat -= 1
return 0
else:
if self.failcount > 0:
# -1 is being returned as the test is intermittent, so no need to bisect
# further.
return -1
# If the test does not fail even once, then proceed to next chunk for bisection.
# loop is set to 2 to proceed on bisection.
self.contents["loop"] = 2
return 1
elif self.result[options.bisectChunk] == "FAIL":
self.summary.append(
"\t\tThe test %s failed." % self.contents["testsToRun"][0]
)
self.failcount += 1
self.contents["loop"] = 1
self.repeat -= 1
# self.max_failures is the maximum number of times a test is allowed
# to fail to be called an intermittent. If a test fails more than
# limit set, it is a perma-fail.
if self.failcount < self.max_failures:
if self.repeat == 0:
# -1 is being returned as the test is intermittent, so no need to bisect
# further.
return -1
return 0
else:
self.summary.append(
"TEST-UNEXPECTED-FAIL | %s | Bleedthrough detected, this test is the "
"root cause for many of the above failures"
% self.contents["testsToRun"][0]
)
return -1
def print_summary(self):
"This method is used to print the recorded summary."
print("Bisection summary:")
for line in self.summary:
print(line)

Просмотреть файл

@ -1,62 +1,62 @@
# encoding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
from io import StringIO
import mozunit
import pytest
from mozterm import Terminal
@pytest.fixture
def terminal():
blessed = pytest.importorskip("blessed")
kind = "xterm-256color"
try:
term = Terminal(stream=StringIO(), force_styling=True, kind=kind)
except blessed.curses.error:
pytest.skip("terminal '{}' not found".format(kind))
return term
EXPECTED_DICT = {
"log_test_status_fail": "\x1b[31mlog_test_status_fail\x1b(B\x1b[m",
"log_process_output": "\x1b[34mlog_process_output\x1b(B\x1b[m",
"log_test_status_pass": "\x1b[32mlog_test_status_pass\x1b(B\x1b[m",
"log_test_status_unexpected_fail": "\x1b[31mlog_test_status_unexpected_fail\x1b(B\x1b[m",
"log_test_status_known_intermittent": "\x1b[33mlog_test_status_known_intermittent\x1b(B\x1b[m",
"time": "\x1b[36mtime\x1b(B\x1b[m",
"action": "\x1b[33maction\x1b(B\x1b[m",
"pid": "\x1b[36mpid\x1b(B\x1b[m",
"heading": "\x1b[1m\x1b[33mheading\x1b(B\x1b[m",
"sub_heading": "\x1b[33msub_heading\x1b(B\x1b[m",
"error": "\x1b[31merror\x1b(B\x1b[m",
"warning": "\x1b[33mwarning\x1b(B\x1b[m",
"bold": "\x1b[1mbold\x1b(B\x1b[m",
"grey": "\x1b[38;2;190;190;190mgrey\x1b(B\x1b[m",
"normal": "\x1b[90mnormal\x1b(B\x1b[m",
"bright_black": "\x1b[90mbright_black\x1b(B\x1b[m",
}
@pytest.mark.skipif(
not sys.platform.startswith("win"),
reason="Only do ANSI Escape Sequence comparisons on Windows.",
)
def test_terminal_colors(terminal):
from mozlog.formatters.machformatter import TerminalColors, color_dict
actual_dict = TerminalColors(terminal, color_dict)
for key in color_dict:
assert getattr(actual_dict, key)(key) == EXPECTED_DICT[key]
if __name__ == "__main__":
mozunit.main()
# encoding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
from io import StringIO
import mozunit
import pytest
from mozterm import Terminal
@pytest.fixture
def terminal():
blessed = pytest.importorskip("blessed")
kind = "xterm-256color"
try:
term = Terminal(stream=StringIO(), force_styling=True, kind=kind)
except blessed.curses.error:
pytest.skip("terminal '{}' not found".format(kind))
return term
EXPECTED_DICT = {
"log_test_status_fail": "\x1b[31mlog_test_status_fail\x1b(B\x1b[m",
"log_process_output": "\x1b[34mlog_process_output\x1b(B\x1b[m",
"log_test_status_pass": "\x1b[32mlog_test_status_pass\x1b(B\x1b[m",
"log_test_status_unexpected_fail": "\x1b[31mlog_test_status_unexpected_fail\x1b(B\x1b[m",
"log_test_status_known_intermittent": "\x1b[33mlog_test_status_known_intermittent\x1b(B\x1b[m",
"time": "\x1b[36mtime\x1b(B\x1b[m",
"action": "\x1b[33maction\x1b(B\x1b[m",
"pid": "\x1b[36mpid\x1b(B\x1b[m",
"heading": "\x1b[1m\x1b[33mheading\x1b(B\x1b[m",
"sub_heading": "\x1b[33msub_heading\x1b(B\x1b[m",
"error": "\x1b[31merror\x1b(B\x1b[m",
"warning": "\x1b[33mwarning\x1b(B\x1b[m",
"bold": "\x1b[1mbold\x1b(B\x1b[m",
"grey": "\x1b[38;2;190;190;190mgrey\x1b(B\x1b[m",
"normal": "\x1b[90mnormal\x1b(B\x1b[m",
"bright_black": "\x1b[90mbright_black\x1b(B\x1b[m",
}
@pytest.mark.skipif(
not sys.platform.startswith("win"),
reason="Only do ANSI Escape Sequence comparisons on Windows.",
)
def test_terminal_colors(terminal):
from mozlog.formatters.machformatter import TerminalColors, color_dict
actual_dict = TerminalColors(terminal, color_dict)
for key in color_dict:
assert getattr(actual_dict, key)(key) == EXPECTED_DICT[key]
if __name__ == "__main__":
mozunit.main()

Просмотреть файл

@ -1,30 +1,30 @@
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import sys
config = {
"options": [
"--prefs-root=%(test_path)s/prefs",
"--config=%(test_path)s/wptrunner.ini",
"--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
"--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
"--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
"--certutil-binary=%(test_install_path)s/bin/certutil",
],
"exes": {
"python": sys.executable,
"hg": "c:/mozilla-build/hg/hg",
},
"default_actions": [
"clobber",
"download-and-extract",
"create-virtualenv",
"pull",
"install",
"run-tests",
],
}
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
import sys
config = {
"options": [
"--prefs-root=%(test_path)s/prefs",
"--config=%(test_path)s/wptrunner.ini",
"--ca-cert-path=%(test_path)s/tests/tools/certs/cacert.pem",
"--host-key-path=%(test_path)s/tests/tools/certs/web-platform.test.key",
"--host-cert-path=%(test_path)s/tests/tools/certs/web-platform.test.pem",
"--certutil-binary=%(test_install_path)s/bin/certutil",
],
"exes": {
"python": sys.executable,
"hg": "c:/mozilla-build/hg/hg",
},
"default_actions": [
"clobber",
"download-and-extract",
"create-virtualenv",
"pull",
"install",
"run-tests",
],
}

Просмотреть файл

@ -1,198 +1,198 @@
import mozunit
import pytest
from talos.test import Test, TsBase, register_test, test_dict, ts_paint
class BasicTestA(Test):
pass
class BasicTestB(Test):
pass
class BasicTestC(Test):
"""basic description"""
keys = ["nonnull_attrib", "null_attrib"]
nonnull_attrib = "value"
null_attrib = None
class NotATest(object):
pass
class Test_register_test(object):
def test_same_instance_returned(self):
decorator = register_test()
NewBasicTest = decorator(BasicTestA)
assert BasicTestA is NewBasicTest
def test_class_registered(self):
_TESTS = test_dict()
decorator = register_test()
# class registered
decorator(BasicTestB)
assert "BasicTestB" in _TESTS
assert BasicTestB in _TESTS.values()
# cannot register same class
with pytest.raises(AssertionError):
decorator(BasicTestB)
# # cannot register other class type
with pytest.raises(AssertionError):
decorator(NotATest)
class TestTest(object):
def test_same_class_name(self):
assert BasicTestA.name() == "BasicTestA"
def test_class_doc(self):
assert BasicTestA.description() is not None
assert BasicTestC.description() == "basic description"
def test_init(self):
basic_test = BasicTestA(new_attrib_a="value_a", new_attrib_b="value_b")
assert basic_test.new_attrib_a == "value_a"
assert basic_test.new_attrib_b == "value_b"
def test_update(self):
basic_test = BasicTestA()
basic_test.update(new_attrib_a="value_a", new_attrib_b="value_b")
assert basic_test.new_attrib_a == "value_a"
assert basic_test.new_attrib_b == "value_b"
basic_test.update(new_attrib_c="value_c")
assert basic_test.new_attrib_c == "value_c"
def test_items(self):
basic_test = BasicTestC()
# returns iterable
try:
iter(basic_test.items())
except TypeError:
pytest.fail("Test.items() did not return iterator")
tuple_result = basic_test.items()[0]
assert len(tuple_result) == 2
# returns not nones
assert ("nonnull_attrib", "value") in basic_test.items()
assert ("null_attrib", None) not in basic_test.items()
# not overriden Test instance
test_instance = Test()
assert test_instance.items() == [("name", "Test")]
# overriden Test instance
test_instance = Test(unregistered_attr="value")
assert ("unregistered_attr", "value") not in test_instance.items()
test_instance = Test()
test_instance.update(keys=["cycles", "desktop", "lower_is_better"])
assert dict(test_instance.items()) == {
"name": "Test",
"desktop": True,
"lower_is_better": True,
}
test_instance = Test()
test_instance.update(new_attrib="some")
assert ("new_attrib", "some") not in test_instance.items()
test_instance = Test()
test_instance.update(keys=["new_attrib"], new_attrib="value")
assert dict(test_instance.items()) == {"name": "Test", "new_attrib": "value"}
test_instance = Test(cycles=20, desktop=False)
assert test_instance.cycles == 20
assert test_instance.desktop is False
test_instance = Test()
test_instance.update(cycles=20, desktop=False)
assert test_instance.cycles == 20
assert test_instance.desktop is False
class TestTsBase(object):
ts_base_registered_keys = {
"url",
"url_timestamp",
"timeout",
"cycles",
"profile_path",
"gecko_profile",
"gecko_profile_interval",
"gecko_profile_entries",
"gecko_profile_startup",
"preferences",
"xperf_counters",
"xperf_providers",
"xperf_user_providers",
"xperf_stackwalk",
"tpmozafterpaint",
"fnbpaint",
"tphero",
"profile",
"firstpaint",
"userready",
"testeventmap",
"base_vs_ref",
"extensions",
"filters",
"setup",
"cleanup",
"webextensions",
"reinstall",
}
def setup_method(self):
self.test_instance = TsBase()
def test_no_unknown_keys_are_somehow_added_alongside_registered_ones(self):
assert set(self.test_instance.keys) == self.ts_base_registered_keys
self.test_instance.update(attribute_one="value", attribute_two="value")
assert set(self.test_instance.keys) == self.ts_base_registered_keys
def test_nonnull_keys_show_up(self):
assert dict(self.test_instance.items()) == {
"name": "TsBase",
"filters": self.test_instance.filters,
}
self.test_instance.update(timeout=500)
assert dict(self.test_instance.items()) == {
"name": "TsBase",
"filters": self.test_instance.filters,
"timeout": 500,
}
class Test_ts_paint(object):
def test_test_nonnull_keys_show_up(self):
test_instance = ts_paint()
keys = {key for key, _ in test_instance.items()}
assert keys == {
"name",
"cycles",
"timeout",
"gecko_profile_startup",
"gecko_profile_entries",
"url",
"xperf_counters",
"filters",
"tpmozafterpaint",
}
if __name__ == "__main__":
mozunit.main()
import mozunit
import pytest
from talos.test import Test, TsBase, register_test, test_dict, ts_paint
class BasicTestA(Test):
pass
class BasicTestB(Test):
pass
class BasicTestC(Test):
"""basic description"""
keys = ["nonnull_attrib", "null_attrib"]
nonnull_attrib = "value"
null_attrib = None
class NotATest(object):
pass
class Test_register_test(object):
def test_same_instance_returned(self):
decorator = register_test()
NewBasicTest = decorator(BasicTestA)
assert BasicTestA is NewBasicTest
def test_class_registered(self):
_TESTS = test_dict()
decorator = register_test()
# class registered
decorator(BasicTestB)
assert "BasicTestB" in _TESTS
assert BasicTestB in _TESTS.values()
# cannot register same class
with pytest.raises(AssertionError):
decorator(BasicTestB)
# # cannot register other class type
with pytest.raises(AssertionError):
decorator(NotATest)
class TestTest(object):
def test_same_class_name(self):
assert BasicTestA.name() == "BasicTestA"
def test_class_doc(self):
assert BasicTestA.description() is not None
assert BasicTestC.description() == "basic description"
def test_init(self):
basic_test = BasicTestA(new_attrib_a="value_a", new_attrib_b="value_b")
assert basic_test.new_attrib_a == "value_a"
assert basic_test.new_attrib_b == "value_b"
def test_update(self):
basic_test = BasicTestA()
basic_test.update(new_attrib_a="value_a", new_attrib_b="value_b")
assert basic_test.new_attrib_a == "value_a"
assert basic_test.new_attrib_b == "value_b"
basic_test.update(new_attrib_c="value_c")
assert basic_test.new_attrib_c == "value_c"
def test_items(self):
basic_test = BasicTestC()
# returns iterable
try:
iter(basic_test.items())
except TypeError:
pytest.fail("Test.items() did not return iterator")
tuple_result = basic_test.items()[0]
assert len(tuple_result) == 2
# returns not nones
assert ("nonnull_attrib", "value") in basic_test.items()
assert ("null_attrib", None) not in basic_test.items()
# not overriden Test instance
test_instance = Test()
assert test_instance.items() == [("name", "Test")]
# overriden Test instance
test_instance = Test(unregistered_attr="value")
assert ("unregistered_attr", "value") not in test_instance.items()
test_instance = Test()
test_instance.update(keys=["cycles", "desktop", "lower_is_better"])
assert dict(test_instance.items()) == {
"name": "Test",
"desktop": True,
"lower_is_better": True,
}
test_instance = Test()
test_instance.update(new_attrib="some")
assert ("new_attrib", "some") not in test_instance.items()
test_instance = Test()
test_instance.update(keys=["new_attrib"], new_attrib="value")
assert dict(test_instance.items()) == {"name": "Test", "new_attrib": "value"}
test_instance = Test(cycles=20, desktop=False)
assert test_instance.cycles == 20
assert test_instance.desktop is False
test_instance = Test()
test_instance.update(cycles=20, desktop=False)
assert test_instance.cycles == 20
assert test_instance.desktop is False
class TestTsBase(object):
ts_base_registered_keys = {
"url",
"url_timestamp",
"timeout",
"cycles",
"profile_path",
"gecko_profile",
"gecko_profile_interval",
"gecko_profile_entries",
"gecko_profile_startup",
"preferences",
"xperf_counters",
"xperf_providers",
"xperf_user_providers",
"xperf_stackwalk",
"tpmozafterpaint",
"fnbpaint",
"tphero",
"profile",
"firstpaint",
"userready",
"testeventmap",
"base_vs_ref",
"extensions",
"filters",
"setup",
"cleanup",
"webextensions",
"reinstall",
}
def setup_method(self):
self.test_instance = TsBase()
def test_no_unknown_keys_are_somehow_added_alongside_registered_ones(self):
assert set(self.test_instance.keys) == self.ts_base_registered_keys
self.test_instance.update(attribute_one="value", attribute_two="value")
assert set(self.test_instance.keys) == self.ts_base_registered_keys
def test_nonnull_keys_show_up(self):
assert dict(self.test_instance.items()) == {
"name": "TsBase",
"filters": self.test_instance.filters,
}
self.test_instance.update(timeout=500)
assert dict(self.test_instance.items()) == {
"name": "TsBase",
"filters": self.test_instance.filters,
"timeout": 500,
}
class Test_ts_paint(object):
def test_test_nonnull_keys_show_up(self):
test_instance = ts_paint()
keys = {key for key, _ in test_instance.items()}
assert keys == {
"name",
"cycles",
"timeout",
"gecko_profile_startup",
"gecko_profile_entries",
"url",
"xperf_counters",
"filters",
"tpmozafterpaint",
}
if __name__ == "__main__":
mozunit.main()

Просмотреть файл

@ -1,32 +1,32 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from setuptools import find_packages, setup
VERSION = "1.0.0"
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
author="Mozilla Telemetry Team",
author_email="telemetry-client-dev@mozilla.com",
url=(
"https://firefox-source-docs.mozilla.org/"
"toolkit/components/telemetry/telemetry/collection/index.html"
),
name="mozparsers",
description="Shared parsers for the Telemetry probe regitries.",
long_description=long_description,
long_description_content_type="text/markdown",
license="MPL 2.0",
packages=find_packages(),
version=VERSION,
classifiers=[
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Programming Language :: Python :: 2.7",
],
keywords=["mozilla", "telemetry", "parsers"],
)
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from setuptools import find_packages, setup
VERSION = "1.0.0"
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
author="Mozilla Telemetry Team",
author_email="telemetry-client-dev@mozilla.com",
url=(
"https://firefox-source-docs.mozilla.org/"
"toolkit/components/telemetry/telemetry/collection/index.html"
),
name="mozparsers",
description="Shared parsers for the Telemetry probe regitries.",
long_description=long_description,
long_description_content_type="text/markdown",
license="MPL 2.0",
packages=find_packages(),
version=VERSION,
classifiers=[
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Programming Language :: Python :: 2.7",
],
keywords=["mozilla", "telemetry", "parsers"],
)

Просмотреть файл

@ -1,95 +1,95 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import logging
import os
import sys
from mach.decorators import Command
from mozbuild.base import BinaryNotFoundException
from mozbuild.base import MachCommandConditions as conditions
def create_parser_tests():
from marionette_harness.runtests import MarionetteArguments
from mozlog.structured import commandline
parser = MarionetteArguments()
commandline.add_logging_group(parser)
return parser
def run_telemetry(tests, binary=None, topsrcdir=None, **kwargs):
from marionette_harness.runtests import MarionetteHarness
from mozlog.structured import commandline
from telemetry_harness.runtests import TelemetryTestRunner
parser = create_parser_tests()
if not tests:
tests = [
os.path.join(
topsrcdir,
"toolkit/components/telemetry/tests/marionette/tests/manifest.ini",
)
]
args = argparse.Namespace(tests=tests)
args.binary = binary
args.logger = kwargs.pop("log", None)
for k, v in kwargs.items():
setattr(args, k, v)
parser.verify_usage(args)
os.environ["MOZ_IGNORE_NSS_SHUTDOWN_LEAKS"] = "1"
# Causes Firefox to crash when using non-local connections.
os.environ["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
if not args.logger:
args.logger = commandline.setup_logging(
"Telemetry Client Tests", args, {"mach": sys.stdout}
)
failed = MarionetteHarness(TelemetryTestRunner, args=vars(args)).run()
if failed > 0:
return 1
return 0
@Command(
"telemetry-tests-client",
category="testing",
description="Run tests specifically for the Telemetry client",
conditions=[conditions.is_firefox_or_android],
parser=create_parser_tests,
)
def telemetry_test(command_context, tests, **kwargs):
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not kwargs.get("binary") and conditions.is_firefox(command_context):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"telemetry-tests-client",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "telemetry-tests-client", {"help": e.help()}, "{help}"
)
return 1
if not kwargs.get("server_root"):
kwargs[
"server_root"
] = "toolkit/components/telemetry/tests/marionette/harness/www"
return run_telemetry(tests, topsrcdir=command_context.topsrcdir, **kwargs)
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import logging
import os
import sys
from mach.decorators import Command
from mozbuild.base import BinaryNotFoundException
from mozbuild.base import MachCommandConditions as conditions
def create_parser_tests():
from marionette_harness.runtests import MarionetteArguments
from mozlog.structured import commandline
parser = MarionetteArguments()
commandline.add_logging_group(parser)
return parser
def run_telemetry(tests, binary=None, topsrcdir=None, **kwargs):
from marionette_harness.runtests import MarionetteHarness
from mozlog.structured import commandline
from telemetry_harness.runtests import TelemetryTestRunner
parser = create_parser_tests()
if not tests:
tests = [
os.path.join(
topsrcdir,
"toolkit/components/telemetry/tests/marionette/tests/manifest.ini",
)
]
args = argparse.Namespace(tests=tests)
args.binary = binary
args.logger = kwargs.pop("log", None)
for k, v in kwargs.items():
setattr(args, k, v)
parser.verify_usage(args)
os.environ["MOZ_IGNORE_NSS_SHUTDOWN_LEAKS"] = "1"
# Causes Firefox to crash when using non-local connections.
os.environ["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
if not args.logger:
args.logger = commandline.setup_logging(
"Telemetry Client Tests", args, {"mach": sys.stdout}
)
failed = MarionetteHarness(TelemetryTestRunner, args=vars(args)).run()
if failed > 0:
return 1
return 0
@Command(
"telemetry-tests-client",
category="testing",
description="Run tests specifically for the Telemetry client",
conditions=[conditions.is_firefox_or_android],
parser=create_parser_tests,
)
def telemetry_test(command_context, tests, **kwargs):
if "test_objects" in kwargs:
tests = []
for obj in kwargs["test_objects"]:
tests.append(obj["file_relpath"])
del kwargs["test_objects"]
if not kwargs.get("binary") and conditions.is_firefox(command_context):
try:
kwargs["binary"] = command_context.get_binary_path("app")
except BinaryNotFoundException as e:
command_context.log(
logging.ERROR,
"telemetry-tests-client",
{"error": str(e)},
"ERROR: {error}",
)
command_context.log(
logging.INFO, "telemetry-tests-client", {"help": e.help()}, "{help}"
)
return 1
if not kwargs.get("server_root"):
kwargs[
"server_root"
] = "toolkit/components/telemetry/tests/marionette/harness/www"
return run_telemetry(tests, topsrcdir=command_context.topsrcdir, **kwargs)